diff --git a/.gitignore b/.gitignore index 2030c7a4d2f8c4..ce2c6348d372d0 100644 --- a/.gitignore +++ b/.gitignore @@ -34,7 +34,6 @@ *.mod.c *.o *.o.* -*.order *.patch *.s *.so @@ -46,6 +45,7 @@ *.xz Module.symvers modules.builtin +modules.order # # Top-level generic files diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 5f7d7b14fa440d..06d0931119ccc0 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -562,3 +562,13 @@ Description: Umwait control or C0.2 state. The time is an unsigned 32-bit number. Note that a value of zero means there is no limit. Low order two bits must be zero. + +What: /sys/devices/system/cpu/svm +Date: August 2019 +Contact: Linux kernel mailing list + Linux for PowerPC mailing list +Description: Secure Virtual Machine + + If 1, it means the system is using the Protected Execution + Facility in POWER9 and newer processors. i.e., it is a Secure + Virtual Machine. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 782e9072407b94..d3814789304fac 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -860,6 +860,10 @@ disable_radix [PPC] Disable RADIX MMU mode on POWER9 + disable_tlbie [PPC] + Disable TLBIE instruction. Currently does not work + with KVM, with HASH MMU, or with coherent accelerators. + disable_cpu_apicid= [X86,APIC,SMP] Format: The number of initial APIC ID for the @@ -4641,6 +4645,11 @@ /sys/power/pm_test). Only available when CONFIG_PM_DEBUG is set. Default value is 5. + svm= [PPC] + Format: { on | off | y | n | 1 | 0 } + This parameter controls use of the Protected + Execution Facility on pSeries. + swapaccount=[0|1] [KNL] Enable accounting of swap in memory resource controller if no parameter or 1 is given or disable @@ -5326,3 +5335,22 @@ A hex value specifying bitmask with supplemental xhci host controller quirks. Meaning of each bit can be consulted in header drivers/usb/host/xhci.h. + + xmon [PPC] + Format: { early | on | rw | ro | off } + Controls if xmon debugger is enabled. Default is off. + Passing only "xmon" is equivalent to "xmon=early". + early Call xmon as early as possible on boot; xmon + debugger is called from setup_arch(). + on xmon debugger hooks will be installed so xmon + is only called on a kernel crash. Default mode, + i.e. either "ro" or "rw" mode, is controlled + with CONFIG_XMON_DEFAULT_RO_MODE. + rw xmon debugger hooks will be installed so xmon + is called only on a kernel crash, mode is write, + meaning SPR registers, memory and, other data + can be written using xmon commands. + ro same as "rw" option above but SPR registers, + memory, and other data can't be written using + xmon commands. + off xmon is disabled. diff --git a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt index 7b8b8eb0191f32..26410fbb85befd 100644 --- a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt +++ b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt @@ -18,17 +18,19 @@ Clocks: ------- -The Device Tree node representing the AP806 system controller provides -a number of clocks: +The Device Tree node representing the AP806/AP807 system controller +provides a number of clocks: - - 0: clock of CPU cluster 0 - - 1: clock of CPU cluster 1 + - 0: reference clock of CPU cluster 0 + - 1: reference clock of CPU cluster 1 - 2: fixed PLL at 1200 Mhz - 3: MSS clock, derived from the fixed PLL Required properties: - - compatible: must be: "marvell,ap806-clock" + - compatible: must be one of: + * "marvell,ap806-clock" + * "marvell,ap807-clock" - #clock-cells: must be set to 1 Pinctrl: @@ -143,3 +145,33 @@ ap_syscon1: system-controller@6f8000 { #thermal-sensor-cells = <1>; }; }; + +Cluster clocks: +--------------- + +Device Tree Clock bindings for cluster clock of Marvell +AP806/AP807. Each cluster contain up to 2 CPUs running at the same +frequency. + +Required properties: + - compatible: must be one of: + * "marvell,ap806-cpu-clock" + * "marvell,ap807-cpu-clock" +- #clock-cells : should be set to 1. + +- clocks : shall be the input parent clock(s) phandle for the clock + (one per cluster) + +- reg: register range associated with the cluster clocks + +ap_syscon1: system-controller@6f8000 { + compatible = "marvell,armada-ap806-syscon1", "syscon", "simple-mfd"; + reg = <0x6f8000 0x1000>; + + cpu_clk: clock-cpu@278 { + compatible = "marvell,ap806-cpu-clock"; + clocks = <&ap_clk 0>, <&ap_clk 1>; + #clock-cells = <1>; + reg = <0x278 0xa30>; + }; +}; diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt index eddde4faef0158..f6d6642d81c075 100644 --- a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt +++ b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt @@ -48,3 +48,11 @@ avs: avs@11500 { compatible = "marvell,armada-3700-avs", "syscon"; reg = <0x11500 0x40>; } + + +CZ.NIC's Turris Mox SOHO router Device Tree Bindings +---------------------------------------------------- + +Required root node property: + + - compatible: must contain "cznic,turris-mox" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt index 161e63a6c254d0..ff000ccade78c4 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-apmixedsys" - "mediatek,mt2712-apmixedsys", "syscon" + - "mediatek,mt6779-apmixedsys", "syscon" - "mediatek,mt6797-apmixedsys" - "mediatek,mt7622-apmixedsys" - "mediatek,mt7623-apmixedsys", "mediatek,mt2701-apmixedsys" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt index 07c9d813465c9d..e4ca7b7031236f 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt @@ -7,6 +7,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-audsys", "syscon" + - "mediatek,mt6779-audio", "syscon" - "mediatek,mt7622-audsys", "syscon" - "mediatek,mt7623-audsys", "mediatek,mt2701-audsys", "syscon" - "mediatek,mt8183-audiosys", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt index d8930f64aa985e..1f4aaa15a37ecd 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt @@ -6,6 +6,7 @@ The MediaTek camsys controller provides various clocks to the system. Required Properties: - compatible: Should be one of: + - "mediatek,mt6779-camsys", "syscon" - "mediatek,mt8183-camsys", "syscon" - #clock-cells: Must be 1 diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt index e3bc4a1e7a6e46..2b693e343c56d6 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-imgsys", "syscon" - "mediatek,mt2712-imgsys", "syscon" + - "mediatek,mt6779-imgsys", "syscon" - "mediatek,mt6797-imgsys", "syscon" - "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon" - "mediatek,mt8173-imgsys", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt index a90913988d7ea7..db2f4fd754e7d7 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt @@ -9,6 +9,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-infracfg", "syscon" - "mediatek,mt2712-infracfg", "syscon" + - "mediatek,mt6779-infracfg_ao", "syscon" - "mediatek,mt6797-infracfg", "syscon" - "mediatek,mt7622-infracfg", "syscon" - "mediatek,mt7623-infracfg", "mediatek,mt2701-infracfg", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt new file mode 100644 index 00000000000000..2ce889b023d9b4 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt @@ -0,0 +1,22 @@ +Mediatek ipesys controller +============================ + +The Mediatek ipesys controller provides various clocks to the system. + +Required Properties: + +- compatible: Should be one of: + - "mediatek,mt6779-ipesys", "syscon" +- #clock-cells: Must be 1 + +The ipesys controller uses the common clk binding from +Documentation/devicetree/bindings/clock/clock-bindings.txt +The available clocks are defined in dt-bindings/clock/mt*-clk.h. + +Example: + +ipesys: clock-controller@1b000000 { + compatible = "mediatek,mt6779-ipesys", "syscon"; + reg = <0 0x1b000000 0 0x1000>; + #clock-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt index 72787e7dd2274c..ad5f9d2f68183d 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt @@ -7,6 +7,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2712-mfgcfg", "syscon" + - "mediatek,mt6779-mfgcfg", "syscon" - "mediatek,mt8183-mfgcfg", "syscon" - #clock-cells: Must be 1 diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt index 545eab717c9674..301eefbe161892 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-mmsys", "syscon" - "mediatek,mt2712-mmsys", "syscon" + - "mediatek,mt6779-mmsys", "syscon" - "mediatek,mt6797-mmsys", "syscon" - "mediatek,mt7623-mmsys", "mediatek,mt2701-mmsys", "syscon" - "mediatek,mt8173-mmsys", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt index 4c7e478117a0c7..ecf027a9003a86 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt @@ -14,6 +14,7 @@ Required Properties: - "mediatek,mt7629-pericfg", "syscon" - "mediatek,mt8135-pericfg", "syscon" - "mediatek,mt8173-pericfg", "syscon" + - "mediatek,mt8183-pericfg", "syscon" - #clock-cells: Must be 1 - #reset-cells: Must be 1 diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt index a023b8338960f2..0293d693ce0caa 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-topckgen" - "mediatek,mt2712-topckgen", "syscon" + - "mediatek,mt6779-topckgen", "syscon" - "mediatek,mt6797-topckgen" - "mediatek,mt7622-topckgen" - "mediatek,mt7623-topckgen", "mediatek,mt2701-topckgen" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt index 57176bb8dbb508..7894558b7a1c36 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-vdecsys", "syscon" - "mediatek,mt2712-vdecsys", "syscon" + - "mediatek,mt6779-vdecsys", "syscon" - "mediatek,mt6797-vdecsys", "syscon" - "mediatek,mt7623-vdecsys", "mediatek,mt2701-vdecsys", "syscon" - "mediatek,mt8173-vdecsys", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt index c9faa626908769..6a6a14e15cd7ff 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt @@ -7,6 +7,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2712-vencsys", "syscon" + - "mediatek,mt6779-vencsys", "syscon" - "mediatek,mt6797-vencsys", "syscon" - "mediatek,mt8173-vencsys", "syscon" - "mediatek,mt8183-vencsys", "syscon" diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml index fa4d143a73de2e..64938fdaea5549 100644 --- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml +++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml @@ -31,6 +31,7 @@ properties: - allwinner,sun8i-h3-ccu - allwinner,sun8i-h3-r-ccu - allwinner,sun8i-r40-ccu + - allwinner,sun8i-v3-ccu - allwinner,sun8i-v3s-ccu - allwinner,sun9i-a80-ccu - allwinner,sun50i-a64-ccu diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt index dd906db34b328a..9e0b03a6519b3e 100644 --- a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt +++ b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt @@ -12,7 +12,9 @@ clock generators, but a few (like the ARM or HDMI) will source from the PLL dividers directly. Required properties: -- compatible: Should be "brcm,bcm2835-cprman" +- compatible: should be one of the following, + "brcm,bcm2711-cprman" + "brcm,bcm2835-cprman" - #clock-cells: Should be <1>. The permitted clock-specifier values can be found in include/dt-bindings/clock/bcm2835.h - reg: Specifies base physical address and size of the registers diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt index 8661c3cd3ccf54..d14362ad413231 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt @@ -23,6 +23,7 @@ Required properties : "qcom,gcc-sdm630" "qcom,gcc-sdm660" "qcom,gcc-sdm845" + "qcom,gcc-sm8150" - reg : shall contain base register location and length - #clock-cells : shall contain 1 @@ -38,6 +39,13 @@ Documentation/devicetree/bindings/thermal/qcom-tsens.txt - protected-clocks : Protected clock specifier list as per common clock binding. +For SM8150 only: + - clocks: a list of phandles and clock-specifier pairs, + one for each entry in clock-names. + - clock-names: "bi_tcxo" (required) + "sleep_clk" (optional) + "aud_ref_clock" (optional) + Example: clock-controller@900000 { compatible = "qcom,gcc-msm8960"; @@ -71,3 +79,16 @@ Example of GCC with protected-clocks properties: , ; }; + +Example of GCC with clocks + gcc: clock-controller@100000 { + compatible = "qcom,gcc-sm8150"; + reg = <0x00100000 0x1f0000>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + clock-names = "bi_tcxo", + "sleep_clk"; + clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, + <&sleep_clk>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt index 3c007653da3124..365bbde599b1f3 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt @@ -6,9 +6,14 @@ some Qualcomm Technologies Inc. SoCs. It accepts clock requests from other hardware subsystems via RSC to control clocks. Required properties : -- compatible : shall contain "qcom,sdm845-rpmh-clk" +- compatible : must be one of: + "qcom,sdm845-rpmh-clk" + "qcom,sm8150-rpmh-clk" - #clock-cells : must contain 1 +- clocks: a list of phandles and clock-specifier pairs, + one for each entry in clock-names. +- clock-names: Parent board clock: "xo". Example : diff --git a/Documentation/devicetree/bindings/clock/emev2-clock.txt b/Documentation/devicetree/bindings/clock/renesas,emev2-smu.txt similarity index 100% rename from Documentation/devicetree/bindings/clock/emev2-clock.txt rename to Documentation/devicetree/bindings/clock/renesas,emev2-smu.txt diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.txt new file mode 100644 index 00000000000000..9b151c5b0c905a --- /dev/null +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3308-cru.txt @@ -0,0 +1,60 @@ +* Rockchip RK3308 Clock and Reset Unit + +The RK3308 clock controller generates and supplies clock to various +controllers within the SoC and also implements a reset controller for SoC +peripherals. + +Required Properties: + +- compatible: CRU should be "rockchip,rk3308-cru" +- reg: physical base address of the controller and length of memory mapped + region. +- #clock-cells: should be 1. +- #reset-cells: should be 1. + +Optional Properties: + +- rockchip,grf: phandle to the syscon managing the "general register files" + If missing, pll rates are not changeable, due to the missing pll lock status. + +Each clock is assigned an identifier and client nodes can use this identifier +to specify the clock which they consume. All available clocks are defined as +preprocessor macros in the dt-bindings/clock/rk3308-cru.h headers and can be +used in device tree sources. Similar macros exist for the reset sources in +these files. + +External clocks: + +There are several clocks that are generated outside the SoC. It is expected +that they are defined using standard clock bindings with following +clock-output-names: + - "xin24m" - crystal input - required, + - "xin32k" - rtc clock - optional, + - "mclk_i2s0_8ch_in", "mclk_i2s1_8ch_in", "mclk_i2s2_8ch_in", + "mclk_i2s3_8ch_in", "mclk_i2s0_2ch_in", + "mclk_i2s1_2ch_in" - external I2S or SPDIF clock - optional, + - "mac_clkin" - external MAC clock - optional + +Example: Clock controller node: + + cru: clock-controller@ff500000 { + compatible = "rockchip,rk3308-cru"; + reg = <0x0 0xff500000 0x0 0x1000>; + rockchip,grf = <&grf>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + +Example: UART controller node that consumes the clock generated by the clock + controller: + + uart0: serial@ff0a0000 { + compatible = "rockchip,rk3308-uart", "snps,dw-apb-uart"; + reg = <0x0 0xff0a0000 0x0 0x100>; + interrupts = ; + clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>; + clock-names = "baudclk", "apb_pclk"; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; diff --git a/Documentation/devicetree/bindings/clock/ti,cdce925.txt b/Documentation/devicetree/bindings/clock/ti,cdce925.txt index 0d01f2d5cc36e8..26544c85202af9 100644 --- a/Documentation/devicetree/bindings/clock/ti,cdce925.txt +++ b/Documentation/devicetree/bindings/clock/ti,cdce925.txt @@ -24,6 +24,8 @@ Required properties: Optional properties: - xtal-load-pf: Crystal load-capacitor value to fine-tune performance on a board, or to compensate for external influences. +- vdd-supply: A regulator node for Vdd +- vddout-supply: A regulator node for Vddout For all PLL1, PLL2, ... an optional child node can be used to specify spread spectrum clocking parameters for a board. @@ -41,6 +43,8 @@ Example: clocks = <&xtal_27Mhz>; #clock-cells = <1>; xtal-load-pf = <5>; + vdd-supply = <&1v8-reg>; + vddout-supply = <&3v3-reg>; /* PLL options to get SSC 1% centered */ PLL2 { spread-spectrum = <4>; diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst index 61b2181ed3ea8d..f1e5dce86af7cf 100644 --- a/Documentation/kbuild/kbuild.rst +++ b/Documentation/kbuild/kbuild.rst @@ -105,6 +105,15 @@ The output directory can also be specified using "O=...". Setting "O=..." takes precedence over KBUILD_OUTPUT. +KBUILD_EXTRA_WARN +----------------- +Specify the extra build checks. The same value can be assigned by passing +W=... from the command line. + +See `make help` for the list of the supported values. + +Setting "W=..." takes precedence over KBUILD_EXTRA_WARN. + KBUILD_DEBARCH -------------- For the deb-pkg target, allows overriding the normal heuristics deployed by @@ -241,11 +250,6 @@ To get all available archs you can also specify all. E.g.:: $ make ALLSOURCE_ARCHS=all tags -KBUILD_ENABLE_EXTRA_GCC_CHECKS ------------------------------- -If enabled over the make command line with "W=1", it turns on additional -gcc -W... options for more extensive build-time checking. - KBUILD_BUILD_TIMESTAMP ---------------------- Setting this to a date string overrides the timestamp used in the @@ -258,17 +262,3 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST These two variables allow to override the user@host string displayed during boot and in /proc/version. The default value is the output of the commands whoami and host, respectively. - -KBUILD_LDS ----------- -The linker script with full path. Assigned by the top-level Makefile. - -KBUILD_VMLINUX_OBJS -------------------- -All object files for vmlinux. They are linked to vmlinux in the same -order as listed in KBUILD_VMLINUX_OBJS. - -KBUILD_VMLINUX_LIBS -------------------- -All .a "lib" files for vmlinux. KBUILD_VMLINUX_OBJS and KBUILD_VMLINUX_LIBS -together specify all the object files used to link vmlinux. diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index f4f0f7ffde2bf8..6ba9d5365ff3dd 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -471,21 +471,6 @@ more details, with real examples. The second argument is optional, and if supplied will be used if first argument is not supported. - cc-ldoption - cc-ldoption is used to check if $(CC) when used to link object files - supports the given option. An optional second option may be - specified if first option are not supported. - - Example:: - - #arch/x86/kernel/Makefile - vsyscall-flags += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) - - In the above example, vsyscall-flags will be assigned the option - -Wl$(comma)--hash-style=sysv if it is supported by $(CC). - The second argument is optional, and if supplied will be used - if first argument is not supported. - as-instr as-instr checks if the assembler reports a specific instruction and then outputs either option1 or option2 @@ -765,7 +750,8 @@ Files matching the patterns "*.[oas]", "*.ko", plus some additional files generated by kbuild are deleted all over the kernel src tree when "make clean" is executed. -Additional files can be specified in kbuild makefiles by use of $(clean-files). +Additional files or directories can be specified in kbuild makefiles by use of +$(clean-files). Example:: @@ -776,23 +762,8 @@ When executing "make clean", the file "crc32table.h" will be deleted. Kbuild will assume files to be in the same relative directory as the Makefile, except if prefixed with $(objtree). -To delete a directory hierarchy use: - - Example:: - - #scripts/package/Makefile - clean-dirs := $(objtree)/debian/ - -This will delete the directory debian in the toplevel directory, including all -subdirectories. - -To exclude certain files from make clean, use the $(no-clean-files) variable. -This is only a special case used in the top level Kbuild file: - - Example:: - - #Kbuild - no-clean-files := $(bounds-file) $(offsets-file) +To exclude certain files or directories from make clean, use the +$(no-clean-files) variable. Usually kbuild descends down in subdirectories due to "obj-* := dir/", but in the architecture makefiles where the kbuild infrastructure @@ -988,13 +959,25 @@ When kbuild executes, the following steps are followed (roughly): $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic mode) if this option is supported by $(AR). - ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults + KBUILD_LDS + + The linker script with full path. Assigned by the top-level Makefile. + + KBUILD_LDS_MODULE + + The module linker script with full path. Assigned by the top-level + Makefile and additionally by the arch Makefile. + + KBUILD_VMLINUX_OBJS + + All object files for vmlinux. They are linked to vmlinux in the same + order as listed in KBUILD_VMLINUX_OBJS. - These variables are appended to the KBUILD_CPPFLAGS, - KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the - top-level Makefile has set any other flags. This provides a - means for an architecture to override the defaults. + KBUILD_VMLINUX_LIBS + All .a "lib" files for vmlinux. KBUILD_VMLINUX_OBJS and + KBUILD_VMLINUX_LIBS together specify all the object files used to + link vmlinux. 6.2 Add prerequisites to archheaders ------------------------------------ @@ -1139,7 +1122,7 @@ When kbuild executes, the following steps are followed (roughly): header-test-y - header-test-y specifies headers (*.h) in the current directory that + header-test-y specifies headers (`*.h`) in the current directory that should be compile tested to ensure they are self-contained, i.e. compilable as standalone units. If CONFIG_HEADER_TEST is enabled, this builds them as part of extra-y. @@ -1147,11 +1130,11 @@ When kbuild executes, the following steps are followed (roughly): header-test-pattern-y This works as a weaker version of header-test-y, and accepts wildcard - patterns. The typical usage is: + patterns. The typical usage is:: - header-test-pattern-y += *.h + header-test-pattern-y += *.h - This specifies all the files that matches to '*.h' in the current + This specifies all the files that matches to `*.h` in the current directory, but the files in 'header-test-' are excluded. 6.7 Commands useful for building a boot image diff --git a/Documentation/powerpc/elfnote.rst b/Documentation/powerpc/elfnote.rst new file mode 100644 index 00000000000000..06602248621cb1 --- /dev/null +++ b/Documentation/powerpc/elfnote.rst @@ -0,0 +1,41 @@ +========================== +ELF Note PowerPC Namespace +========================== + +The PowerPC namespace in an ELF Note of the kernel binary is used to store +capabilities and information which can be used by a bootloader or userland. + +Types and Descriptors +--------------------- + +The types to be used with the "PowerPC" namesapce are defined in [#f1]_. + + 1) PPC_ELFNOTE_CAPABILITIES + +Define the capabilities supported/required by the kernel. This type uses a +bitmap as "descriptor" field. Each bit is described below: + +- Ultravisor-capable bit (PowerNV only). + +.. code-block:: c + + #define PPCCAP_ULTRAVISOR_BIT (1 << 0) + +Indicate that the powerpc kernel binary knows how to run in an +ultravisor-enabled system. + +In an ultravisor-enabled system, some machine resources are now controlled +by the ultravisor. If the kernel is not ultravisor-capable, but it ends up +being run on a machine with ultravisor, the kernel will probably crash +trying to access ultravisor resources. For instance, it may crash in early +boot trying to set the partition table entry 0. + +In an ultravisor-enabled system, a bootloader could warn the user or prevent +the kernel from being run if the PowerPC ultravisor capability doesn't exist +or the Ultravisor-capable bit is not set. + +References +---------- + +.. [#f1] arch/powerpc/include/asm/elfnote.h + diff --git a/Documentation/powerpc/firmware-assisted-dump.rst b/Documentation/powerpc/firmware-assisted-dump.rst index 9ca12830a48e4d..0455a78486d5ec 100644 --- a/Documentation/powerpc/firmware-assisted-dump.rst +++ b/Documentation/powerpc/firmware-assisted-dump.rst @@ -9,18 +9,18 @@ a crashed system, and to do so from a fully-reset system, and to minimize the total elapsed time until the system is back in production use. -- Firmware assisted dump (fadump) infrastructure is intended to replace +- Firmware-Assisted Dump (FADump) infrastructure is intended to replace the existing phyp assisted dump. - Fadump uses the same firmware interfaces and memory reservation model as phyp assisted dump. -- Unlike phyp dump, fadump exports the memory dump through /proc/vmcore +- Unlike phyp dump, FADump exports the memory dump through /proc/vmcore in the ELF format in the same way as kdump. This helps us reuse the kdump infrastructure for dump capture and filtering. - Unlike phyp dump, userspace tool does not need to refer any sysfs interface while reading /proc/vmcore. -- Unlike phyp dump, fadump allows user to release all the memory reserved +- Unlike phyp dump, FADump allows user to release all the memory reserved for dump, with a single operation of echo 1 > /sys/kernel/fadump_release_mem. -- Once enabled through kernel boot parameter, fadump can be +- Once enabled through kernel boot parameter, FADump can be started/stopped through /sys/kernel/fadump_registered interface (see sysfs files section below) and can be easily integrated with kdump service start/stop init scripts. @@ -34,7 +34,7 @@ dump offers several strong, practical advantages: in a clean, consistent state. - Once the dump is copied out, the memory that held the dump is immediately available to the running kernel. And therefore, - unlike kdump, fadump doesn't need a 2nd reboot to get back + unlike kdump, FADump doesn't need a 2nd reboot to get back the system to the production configuration. The above can only be accomplished by coordination with, @@ -46,10 +46,9 @@ as follows: These registered sections of memory are reserved by the first kernel during early boot. -- When a system crashes, the Power firmware will save - the low memory (boot memory of size larger of 5% of system RAM - or 256MB) of RAM to the previous registered region. It will - also save system registers, and hardware PTE's. +- When system crashes, the Power firmware will copy the registered + low memory regions (boot memory) from source to destination area. + It will also save hardware PTE's. NOTE: The term 'boot memory' means size of the low memory chunk @@ -61,9 +60,9 @@ as follows: the default calculated size. Use this option if default boot memory size is not sufficient for second kernel to boot successfully. For syntax of crashkernel= parameter, - refer to Documentation/admin-guide/kdump/kdump.rst. If any offset is - provided in crashkernel= parameter, it will be ignored - as fadump uses a predefined offset to reserve memory + refer to Documentation/admin-guide/kdump/kdump.rst. If any + offset is provided in crashkernel= parameter, it will be + ignored as FADump uses a predefined offset to reserve memory for boot memory dump preservation in case of a crash. - After the low memory (boot memory) area has been saved, the @@ -71,13 +70,15 @@ as follows: *not* clear the RAM. It will then launch the bootloader, as normal. -- The freshly booted kernel will notice that there is a new - node (ibm,dump-kernel) in the device tree, indicating that +- The freshly booted kernel will notice that there is a new node + (rtas/ibm,kernel-dump on pSeries or ibm,opal/dump/mpipl-boot + on OPAL platform) in the device tree, indicating that there is crash data available from a previous boot. During the early boot OS will reserve rest of the memory above boot memory size effectively booting with restricted memory - size. This will make sure that the second kernel will not - touch any of the dump memory area. + size. This will make sure that this kernel (also, referred + to as second kernel or capture kernel) will not touch any + of the dump memory area. - User-space tools will read /proc/vmcore to obtain the contents of memory, which holds the previous crashed kernel dump in ELF @@ -94,8 +95,30 @@ as follows: # echo 1 > /sys/kernel/fadump_release_mem Please note that the firmware-assisted dump feature -is only available on Power6 and above systems with recent -firmware versions. +is only available on POWER6 and above systems on pSeries +(PowerVM) platform and POWER9 and above systems with OP940 +or later firmware versions on PowerNV (OPAL) platform. +Note that, OPAL firmware exports ibm,opal/dump node when +FADump is supported on PowerNV platform. + +On OPAL based machines, system first boots into an intermittent +kernel (referred to as petitboot kernel) before booting into the +capture kernel. This kernel would have minimal kernel and/or +userspace support to process crash data. Such kernel needs to +preserve previously crash'ed kernel's memory for the subsequent +capture kernel boot to process this crash data. Kernel config +option CONFIG_PRESERVE_FA_DUMP has to be enabled on such kernel +to ensure that crash data is preserved to process later. + +-- On OPAL based machines (PowerNV), if the kernel is build with + CONFIG_OPAL_CORE=y, OPAL memory at the time of crash is also + exported as /sys/firmware/opal/core file. This procfs file is + helpful in debugging OPAL crashes with GDB. The kernel memory + used for exporting this procfs file can be released by echo'ing + '1' to /sys/kernel/fadump_release_opalcore node. + + e.g. + # echo 1 > /sys/kernel/fadump_release_opalcore Implementation details: ----------------------- @@ -110,72 +133,95 @@ that are run. If there is dump data, then the /sys/kernel/fadump_release_mem file is created, and the reserved memory is held. -If there is no waiting dump data, then only the memory required -to hold CPU state, HPTE region, boot memory dump and elfcore -header, is usually reserved at an offset greater than boot memory -size (see Fig. 1). This area is *not* released: this region will -be kept permanently reserved, so that it can act as a receptacle -for a copy of the boot memory content in addition to CPU state -and HPTE region, in the case a crash does occur. Since this reserved -memory area is used only after the system crash, there is no point in -blocking this significant chunk of memory from production kernel. -Hence, the implementation uses the Linux kernel's Contiguous Memory -Allocator (CMA) for memory reservation if CMA is configured for kernel. -With CMA reservation this memory will be available for applications to -use it, while kernel is prevented from using it. With this fadump will -still be able to capture all of the kernel memory and most of the user -space memory except the user pages that were present in CMA region:: +If there is no waiting dump data, then only the memory required to +hold CPU state, HPTE region, boot memory dump, FADump header and +elfcore header, is usually reserved at an offset greater than boot +memory size (see Fig. 1). This area is *not* released: this region +will be kept permanently reserved, so that it can act as a receptacle +for a copy of the boot memory content in addition to CPU state and +HPTE region, in the case a crash does occur. + +Since this reserved memory area is used only after the system crash, +there is no point in blocking this significant chunk of memory from +production kernel. Hence, the implementation uses the Linux kernel's +Contiguous Memory Allocator (CMA) for memory reservation if CMA is +configured for kernel. With CMA reservation this memory will be +available for applications to use it, while kernel is prevented from +using it. With this FADump will still be able to capture all of the +kernel memory and most of the user space memory except the user pages +that were present in CMA region:: o Memory Reservation during first kernel - Low memory Top of memory - 0 boot memory size | - | | |<--Reserved dump area -->| | - V V | Permanent Reservation | V - +-----------+----------/ /---+---+----+-----------+----+------+ - | | |CPU|HPTE| DUMP |ELF | | - +-----------+----------/ /---+---+----+-----------+----+------+ - | ^ - | | - \ / - ------------------------------------------- - Boot memory content gets transferred to - reserved area by firmware at the time of - crash + Low memory Top of memory + 0 boot memory size |<--- Reserved dump area --->| | + | | | Permanent Reservation | | + V V | | V + +-----------+-----/ /---+---+----+-------+-----+-----+----+--+ + | | |///|////| DUMP | HDR | ELF |////| | + +-----------+-----/ /---+---+----+-------+-----+-----+----+--+ + | ^ ^ ^ ^ ^ + | | | | | | + \ CPU HPTE / | | + ------------------------------ | | + Boot memory content gets transferred | | + to reserved area by firmware at the | | + time of crash. | | + FADump Header | + (meta area) | + | + | + Metadata: This area holds a metadata struture whose + address is registered with f/w and retrieved in the + second kernel after crash, on platforms that support + tags (OPAL). Having such structure with info needed + to process the crashdump eases dump capture process. + Fig. 1 + o Memory Reservation during second kernel after crash - Low memory Top of memory - 0 boot memory size | - | |<------------- Reserved dump area ----------- -->| - V V V - +-----------+----------/ /---+---+----+-----------+----+------+ - | | |CPU|HPTE| DUMP |ELF | | - +-----------+----------/ /---+---+----+-----------+----+------+ - | | - V V - Used by second /proc/vmcore + Low memory Top of memory + 0 boot memory size | + | |<------------ Crash preserved area ------------>| + V V |<--- Reserved dump area --->| | + +-----------+-----/ /---+---+----+-------+-----+-----+----+--+ + | | |///|////| DUMP | HDR | ELF |////| | + +-----------+-----/ /---+---+----+-------+-----+-----+----+--+ + | | + V V + Used by second /proc/vmcore kernel to boot + + +---+ + |///| -> Regions (CPU, HPTE & Metadata) marked like this in the above + +---+ figures are not always present. For example, OPAL platform + does not have CPU & HPTE regions while Metadata region is + not supported on pSeries currently. + Fig. 2 -Currently the dump will be copied from /proc/vmcore to a -a new file upon user intervention. The dump data available through -/proc/vmcore will be in ELF format. Hence the existing kdump -infrastructure (kdump scripts) to save the dump works fine with -minor modifications. + +Currently the dump will be copied from /proc/vmcore to a new file upon +user intervention. The dump data available through /proc/vmcore will be +in ELF format. Hence the existing kdump infrastructure (kdump scripts) +to save the dump works fine with minor modifications. KDump scripts on +major Distro releases have already been modified to work seemlessly (no +user intervention in saving the dump) when FADump is used, instead of +KDump, as dump mechanism. The tools to examine the dump will be same as the ones used for kdump. -How to enable firmware-assisted dump (fadump): +How to enable firmware-assisted dump (FADump): ---------------------------------------------- 1. Set config option CONFIG_FA_DUMP=y and build kernel. 2. Boot into linux kernel with 'fadump=on' kernel cmdline option. - By default, fadump reserved memory will be initialized as CMA area. + By default, FADump reserved memory will be initialized as CMA area. Alternatively, user can boot linux kernel with 'fadump=nocma' to - prevent fadump to use CMA. + prevent FADump to use CMA. 3. Optionally, user can also set 'crashkernel=' kernel cmdline to specify size of the memory to reserve for boot memory dump preservation. @@ -201,29 +247,29 @@ the control files and debugfs file to display memory reserved region. Here is the list of files under kernel sysfs: /sys/kernel/fadump_enabled - This is used to display the fadump status. + This is used to display the FADump status. - - 0 = fadump is disabled - - 1 = fadump is enabled + - 0 = FADump is disabled + - 1 = FADump is enabled This interface can be used by kdump init scripts to identify if - fadump is enabled in the kernel and act accordingly. + FADump is enabled in the kernel and act accordingly. /sys/kernel/fadump_registered - This is used to display the fadump registration status as well - as to control (start/stop) the fadump registration. + This is used to display the FADump registration status as well + as to control (start/stop) the FADump registration. - - 0 = fadump is not registered. - - 1 = fadump is registered and ready to handle system crash. + - 0 = FADump is not registered. + - 1 = FADump is registered and ready to handle system crash. - To register fadump echo 1 > /sys/kernel/fadump_registered and + To register FADump echo 1 > /sys/kernel/fadump_registered and echo 0 > /sys/kernel/fadump_registered for un-register and stop the - fadump. Once the fadump is un-registered, the system crash will not + FADump. Once the FADump is un-registered, the system crash will not be handled and vmcore will not be captured. This interface can be easily integrated with kdump service start/stop. /sys/kernel/fadump_release_mem - This file is available only when fadump is active during + This file is available only when FADump is active during second kernel. This is used to release the reserved memory region that are held for saving crash dump. To release the reserved memory echo 1 to it:: @@ -237,25 +283,38 @@ Here is the list of files under kernel sysfs: enhanced to use this interface to release the memory reserved for dump and continue without 2nd reboot. + /sys/kernel/fadump_release_opalcore + + This file is available only on OPAL based machines when FADump is + active during capture kernel. This is used to release the memory + used by the kernel to export /sys/firmware/opal/core file. To + release this memory, echo '1' to it: + + echo 1 > /sys/kernel/fadump_release_opalcore + Here is the list of files under powerpc debugfs: (Assuming debugfs is mounted on /sys/kernel/debug directory.) /sys/kernel/debug/powerpc/fadump_region - This file shows the reserved memory regions if fadump is + This file shows the reserved memory regions if FADump is enabled otherwise this file is empty. The output format is:: : [-] bytes, Dumped: + and for kernel DUMP region is: + + DUMP: Src: , Dest: , Size: , Dumped: # bytes + e.g. - Contents when fadump is registered during first kernel:: + Contents when FADump is registered during first kernel:: # cat /sys/kernel/debug/powerpc/fadump_region CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x0 HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x0 DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x0 - Contents when fadump is active during second kernel:: + Contents when FADump is active during second kernel:: # cat /sys/kernel/debug/powerpc/fadump_region CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x40020 @@ -263,6 +322,7 @@ Here is the list of files under powerpc debugfs: DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x10000000 : [0x00000010000000-0x0000006ffaffff] 0x5ffb0000 bytes, Dumped: 0x5ffb0000 + NOTE: Please refer to Documentation/filesystems/debugfs.txt on how to mount the debugfs filesystem. @@ -273,7 +333,7 @@ TODO: - Need to come up with the better approach to find out more accurate boot memory size that is required for a kernel to boot successfully when booted with restricted memory. - - The fadump implementation introduces a fadump crash info structure + - The FADump implementation introduces a FADump crash info structure in the scratch area before the ELF core header. The idea of introducing this structure is to pass some important crash info data to the second kernel which will help second kernel to populate ELF core header with diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst index 549b1cdd77aeb7..db7b6a880f522b 100644 --- a/Documentation/powerpc/index.rst +++ b/Documentation/powerpc/index.rst @@ -15,6 +15,7 @@ powerpc dawr-power9 dscr eeh-pci-error-recovery + elfnote firmware-assisted-dump hvcs isa-versions @@ -25,6 +26,7 @@ powerpc qe_firmware syscall64-abi transactional_memory + ultravisor .. only:: subproject and html diff --git a/Documentation/powerpc/ultravisor.rst b/Documentation/powerpc/ultravisor.rst new file mode 100644 index 00000000000000..730854f73830a6 --- /dev/null +++ b/Documentation/powerpc/ultravisor.rst @@ -0,0 +1,1054 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. _ultravisor: + +============================ +Protected Execution Facility +============================ + +.. contents:: + :depth: 3 + +Protected Execution Facility +############################ + + Protected Execution Facility (PEF) is an architectural change for + POWER 9 that enables Secure Virtual Machines (SVMs). DD2.3 chips + (PVR=0x004e1203) or greater will be PEF-capable. A new ISA release + will include the PEF RFC02487 changes. + + When enabled, PEF adds a new higher privileged mode, called Ultravisor + mode, to POWER architecture. Along with the new mode there is new + firmware called the Protected Execution Ultravisor (or Ultravisor + for short). Ultravisor mode is the highest privileged mode in POWER + architecture. + + +------------------+ + | Privilege States | + +==================+ + | Problem | + +------------------+ + | Supervisor | + +------------------+ + | Hypervisor | + +------------------+ + | Ultravisor | + +------------------+ + + PEF protects SVMs from the hypervisor, privileged users, and other + VMs in the system. SVMs are protected while at rest and can only be + executed by an authorized machine. All virtual machines utilize + hypervisor services. The Ultravisor filters calls between the SVMs + and the hypervisor to assure that information does not accidentally + leak. All hypercalls except H_RANDOM are reflected to the hypervisor. + H_RANDOM is not reflected to prevent the hypervisor from influencing + random values in the SVM. + + To support this there is a refactoring of the ownership of resources + in the CPU. Some of the resources which were previously hypervisor + privileged are now ultravisor privileged. + +Hardware +======== + + The hardware changes include the following: + + * There is a new bit in the MSR that determines whether the current + process is running in secure mode, MSR(S) bit 41. MSR(S)=1, process + is in secure mode, MSR(s)=0 process is in normal mode. + + * The MSR(S) bit can only be set by the Ultravisor. + + * HRFID cannot be used to set the MSR(S) bit. If the hypervisor needs + to return to a SVM it must use an ultracall. It can determine if + the VM it is returning to is secure. + + * There is a new Ultravisor privileged register, SMFCTRL, which has an + enable/disable bit SMFCTRL(E). + + * The privilege of a process is now determined by three MSR bits, + MSR(S, HV, PR). In each of the tables below the modes are listed + from least privilege to highest privilege. The higher privilege + modes can access all the resources of the lower privilege modes. + + **Secure Mode MSR Settings** + + +---+---+---+---------------+ + | S | HV| PR|Privilege | + +===+===+===+===============+ + | 1 | 0 | 1 | Problem | + +---+---+---+---------------+ + | 1 | 0 | 0 | Privileged(OS)| + +---+---+---+---------------+ + | 1 | 1 | 0 | Ultravisor | + +---+---+---+---------------+ + | 1 | 1 | 1 | Reserved | + +---+---+---+---------------+ + + **Normal Mode MSR Settings** + + +---+---+---+---------------+ + | S | HV| PR|Privilege | + +===+===+===+===============+ + | 0 | 0 | 1 | Problem | + +---+---+---+---------------+ + | 0 | 0 | 0 | Privileged(OS)| + +---+---+---+---------------+ + | 0 | 1 | 0 | Hypervisor | + +---+---+---+---------------+ + | 0 | 1 | 1 | Problem (Host)| + +---+---+---+---------------+ + + * Memory is partitioned into secure and normal memory. Only processes + that are running in secure mode can access secure memory. + + * The hardware does not allow anything that is not running secure to + access secure memory. This means that the Hypervisor cannot access + the memory of the SVM without using an ultracall (asking the + Ultravisor). The Ultravisor will only allow the hypervisor to see + the SVM memory encrypted. + + * I/O systems are not allowed to directly address secure memory. This + limits the SVMs to virtual I/O only. + + * The architecture allows the SVM to share pages of memory with the + hypervisor that are not protected with encryption. However, this + sharing must be initiated by the SVM. + + * When a process is running in secure mode all hypercalls + (syscall lev=1) go to the Ultravisor. + + * When a process is in secure mode all interrupts go to the + Ultravisor. + + * The following resources have become Ultravisor privileged and + require an Ultravisor interface to manipulate: + + * Processor configurations registers (SCOMs). + + * Stop state information. + + * The debug registers CIABR, DAWR, and DAWRX when SMFCTRL(D) is set. + If SMFCTRL(D) is not set they do not work in secure mode. When set, + reading and writing requires an Ultravisor call, otherwise that + will cause a Hypervisor Emulation Assistance interrupt. + + * PTCR and partition table entries (partition table is in secure + memory). An attempt to write to PTCR will cause a Hypervisor + Emulation Assitance interrupt. + + * LDBAR (LD Base Address Register) and IMC (In-Memory Collection) + non-architected registers. An attempt to write to them will cause a + Hypervisor Emulation Assistance interrupt. + + * Paging for an SVM, sharing of memory with Hypervisor for an SVM. + (Including Virtual Processor Area (VPA) and virtual I/O). + + +Software/Microcode +================== + + The software changes include: + + * SVMs are created from normal VM using (open source) tooling supplied + by IBM. + + * All SVMs start as normal VMs and utilize an ultracall, UV_ESM + (Enter Secure Mode), to make the transition. + + * When the UV_ESM ultracall is made the Ultravisor copies the VM into + secure memory, decrypts the verification information, and checks the + integrity of the SVM. If the integrity check passes the Ultravisor + passes control in secure mode. + + * The verification information includes the pass phrase for the + encrypted disk associated with the SVM. This pass phrase is given + to the SVM when requested. + + * The Ultravisor is not involved in protecting the encrypted disk of + the SVM while at rest. + + * For external interrupts the Ultravisor saves the state of the SVM, + and reflects the interrupt to the hypervisor for processing. + For hypercalls, the Ultravisor inserts neutral state into all + registers not needed for the hypercall then reflects the call to + the hypervisor for processing. The H_RANDOM hypercall is performed + by the Ultravisor and not reflected. + + * For virtual I/O to work bounce buffering must be done. + + * The Ultravisor uses AES (IAPM) for protection of SVM memory. IAPM + is a mode of AES that provides integrity and secrecy concurrently. + + * The movement of data between normal and secure pages is coordinated + with the Ultravisor by a new HMM plug-in in the Hypervisor. + + The Ultravisor offers new services to the hypervisor and SVMs. These + are accessed through ultracalls. + +Terminology +=========== + + * Hypercalls: special system calls used to request services from + Hypervisor. + + * Normal memory: Memory that is accessible to Hypervisor. + + * Normal page: Page backed by normal memory and available to + Hypervisor. + + * Shared page: A page backed by normal memory and available to both + the Hypervisor/QEMU and the SVM (i.e page has mappings in SVM and + Hypervisor/QEMU). + + * Secure memory: Memory that is accessible only to Ultravisor and + SVMs. + + * Secure page: Page backed by secure memory and only available to + Ultravisor and SVM. + + * SVM: Secure Virtual Machine. + + * Ultracalls: special system calls used to request services from + Ultravisor. + + +Ultravisor calls API +#################### + + This section describes Ultravisor calls (ultracalls) needed to + support Secure Virtual Machines (SVM)s and Paravirtualized KVM. The + ultracalls allow the SVMs and Hypervisor to request services from the + Ultravisor such as accessing a register or memory region that can only + be accessed when running in Ultravisor-privileged mode. + + The specific service needed from an ultracall is specified in register + R3 (the first parameter to the ultracall). Other parameters to the + ultracall, if any, are specified in registers R4 through R12. + + Return value of all ultracalls is in register R3. Other output values + from the ultracall, if any, are returned in registers R4 through R12. + The only exception to this register usage is the ``UV_RETURN`` + ultracall described below. + + Each ultracall returns specific error codes, applicable in the context + of the ultracall. However, like with the PowerPC Architecture Platform + Reference (PAPR), if no specific error code is defined for a + particular situation, then the ultracall will fallback to an erroneous + parameter-position based code. i.e U_PARAMETER, U_P2, U_P3 etc + depending on the ultracall parameter that may have caused the error. + + Some ultracalls involve transferring a page of data between Ultravisor + and Hypervisor. Secure pages that are transferred from secure memory + to normal memory may be encrypted using dynamically generated keys. + When the secure pages are transferred back to secure memory, they may + be decrypted using the same dynamically generated keys. Generation and + management of these keys will be covered in a separate document. + + For now this only covers ultracalls currently implemented and being + used by Hypervisor and SVMs but others can be added here when it + makes sense. + + The full specification for all hypercalls/ultracalls will eventually + be made available in the public/OpenPower version of the PAPR + specification. + + .. note:: + + If PEF is not enabled, the ultracalls will be redirected to the + Hypervisor which must handle/fail the calls. + +Ultracalls used by Hypervisor +============================= + + This section describes the virtual memory management ultracalls used + by the Hypervisor to manage SVMs. + +UV_PAGE_OUT +----------- + + Encrypt and move the contents of a page from secure memory to normal + memory. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_PAGE_OUT, + uint16_t lpid, /* LPAR ID */ + uint64_t dest_ra, /* real address of destination page */ + uint64_t src_gpa, /* source guest-physical-address */ + uint8_t flags, /* flags */ + uint64_t order) /* page size order */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_PARAMETER if ``lpid`` is invalid. + * U_P2 if ``dest_ra`` is invalid. + * U_P3 if the ``src_gpa`` address is invalid. + * U_P4 if any bit in the ``flags`` is unrecognized + * U_P5 if the ``order`` parameter is unsupported. + * U_FUNCTION if functionality is not supported. + * U_BUSY if page cannot be currently paged-out. + +Description +~~~~~~~~~~~ + + Encrypt the contents of a secure-page and make it available to + Hypervisor in a normal page. + + By default, the source page is unmapped from the SVM's partition- + scoped page table. But the Hypervisor can provide a hint to the + Ultravisor to retain the page mapping by setting the ``UV_SNAPSHOT`` + flag in ``flags`` parameter. + + If the source page is already a shared page the call returns + U_SUCCESS, without doing anything. + +Use cases +~~~~~~~~~ + + #. QEMU attempts to access an address belonging to the SVM but the + page frame for that address is not mapped into QEMU's address + space. In this case, the Hypervisor will allocate a page frame, + map it into QEMU's address space and issue the ``UV_PAGE_OUT`` + call to retrieve the encrypted contents of the page. + + #. When Ultravisor runs low on secure memory and it needs to page-out + an LRU page. In this case, Ultravisor will issue the + ``H_SVM_PAGE_OUT`` hypercall to the Hypervisor. The Hypervisor will + then allocate a normal page and issue the ``UV_PAGE_OUT`` ultracall + and the Ultravisor will encrypt and move the contents of the secure + page into the normal page. + + #. When Hypervisor accesses SVM data, the Hypervisor requests the + Ultravisor to transfer the corresponding page into a insecure page, + which the Hypervisor can access. The data in the normal page will + be encrypted though. + +UV_PAGE_IN +---------- + + Move the contents of a page from normal memory to secure memory. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_PAGE_IN, + uint16_t lpid, /* the LPAR ID */ + uint64_t src_ra, /* source real address of page */ + uint64_t dest_gpa, /* destination guest physical address */ + uint64_t flags, /* flags */ + uint64_t order) /* page size order */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_BUSY if page cannot be currently paged-in. + * U_FUNCTION if functionality is not supported + * U_PARAMETER if ``lpid`` is invalid. + * U_P2 if ``src_ra`` is invalid. + * U_P3 if the ``dest_gpa`` address is invalid. + * U_P4 if any bit in the ``flags`` is unrecognized + * U_P5 if the ``order`` parameter is unsupported. + +Description +~~~~~~~~~~~ + + Move the contents of the page identified by ``src_ra`` from normal + memory to secure memory and map it to the guest physical address + ``dest_gpa``. + + If `dest_gpa` refers to a shared address, map the page into the + partition-scoped page-table of the SVM. If `dest_gpa` is not shared, + copy the contents of the page into the corresponding secure page. + Depending on the context, decrypt the page before being copied. + + The caller provides the attributes of the page through the ``flags`` + parameter. Valid values for ``flags`` are: + + * CACHE_INHIBITED + * CACHE_ENABLED + * WRITE_PROTECTION + + The Hypervisor must pin the page in memory before making + ``UV_PAGE_IN`` ultracall. + +Use cases +~~~~~~~~~ + + #. When a normal VM switches to secure mode, all its pages residing + in normal memory, are moved into secure memory. + + #. When an SVM requests to share a page with Hypervisor the Hypervisor + allocates a page and informs the Ultravisor. + + #. When an SVM accesses a secure page that has been paged-out, + Ultravisor invokes the Hypervisor to locate the page. After + locating the page, the Hypervisor uses UV_PAGE_IN to make the + page available to Ultravisor. + +UV_PAGE_INVAL +------------- + + Invalidate the Ultravisor mapping of a page. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_PAGE_INVAL, + uint16_t lpid, /* the LPAR ID */ + uint64_t guest_pa, /* destination guest-physical-address */ + uint64_t order) /* page size order */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_PARAMETER if ``lpid`` is invalid. + * U_P2 if ``guest_pa`` is invalid (or corresponds to a secure + page mapping). + * U_P3 if the ``order`` is invalid. + * U_FUNCTION if functionality is not supported. + * U_BUSY if page cannot be currently invalidated. + +Description +~~~~~~~~~~~ + + This ultracall informs Ultravisor that the page mapping in Hypervisor + corresponding to the given guest physical address has been invalidated + and that the Ultravisor should not access the page. If the specified + ``guest_pa`` corresponds to a secure page, Ultravisor will ignore the + attempt to invalidate the page and return U_P2. + +Use cases +~~~~~~~~~ + + #. When a shared page is unmapped from the QEMU's page table, possibly + because it is paged-out to disk, Ultravisor needs to know that the + page should not be accessed from its side too. + + +UV_WRITE_PATE +------------- + + Validate and write the partition table entry (PATE) for a given + partition. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_WRITE_PATE, + uint32_t lpid, /* the LPAR ID */ + uint64_t dw0 /* the first double word to write */ + uint64_t dw1) /* the second double word to write */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_BUSY if PATE cannot be currently written to. + * U_FUNCTION if functionality is not supported. + * U_PARAMETER if ``lpid`` is invalid. + * U_P2 if ``dw0`` is invalid. + * U_P3 if the ``dw1`` address is invalid. + * U_PERMISSION if the Hypervisor is attempting to change the PATE + of a secure virtual machine or if called from a + context other than Hypervisor. + +Description +~~~~~~~~~~~ + + Validate and write a LPID and its partition-table-entry for the given + LPID. If the LPID is already allocated and initialized, this call + results in changing the partition table entry. + +Use cases +~~~~~~~~~ + + #. The Partition table resides in Secure memory and its entries, + called PATE (Partition Table Entries), point to the partition- + scoped page tables for the Hypervisor as well as each of the + virtual machines (both secure and normal). The Hypervisor + operates in partition 0 and its partition-scoped page tables + reside in normal memory. + + #. This ultracall allows the Hypervisor to register the partition- + scoped and process-scoped page table entries for the Hypervisor + and other partitions (virtual machines) with the Ultravisor. + + #. If the value of the PATE for an existing partition (VM) changes, + the TLB cache for the partition is flushed. + + #. The Hypervisor is responsible for allocating LPID. The LPID and + its PATE entry are registered together. The Hypervisor manages + the PATE entries for a normal VM and can change the PATE entry + anytime. Ultravisor manages the PATE entries for an SVM and + Hypervisor is not allowed to modify them. + +UV_RETURN +--------- + + Return control from the Hypervisor back to the Ultravisor after + processing an hypercall or interrupt that was forwarded (aka + *reflected*) to the Hypervisor. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_RETURN) + +Return values +~~~~~~~~~~~~~ + + This call never returns to Hypervisor on success. It returns + U_INVALID if ultracall is not made from a Hypervisor context. + +Description +~~~~~~~~~~~ + + When an SVM makes an hypercall or incurs some other exception, the + Ultravisor usually forwards (aka *reflects*) the exceptions to the + Hypervisor. After processing the exception, Hypervisor uses the + ``UV_RETURN`` ultracall to return control back to the SVM. + + The expected register state on entry to this ultracall is: + + * Non-volatile registers are restored to their original values. + * If returning from an hypercall, register R0 contains the return + value (**unlike other ultracalls**) and, registers R4 through R12 + contain any output values of the hypercall. + * R3 contains the ultracall number, i.e UV_RETURN. + * If returning with a synthesized interrupt, R2 contains the + synthesized interrupt number. + +Use cases +~~~~~~~~~ + + #. Ultravisor relies on the Hypervisor to provide several services to + the SVM such as processing hypercall and other exceptions. After + processing the exception, Hypervisor uses UV_RETURN to return + control back to the Ultravisor. + + #. Hypervisor has to use this ultracall to return control to the SVM. + + +UV_REGISTER_MEM_SLOT +-------------------- + + Register an SVM address-range with specified properties. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_REGISTER_MEM_SLOT, + uint64_t lpid, /* LPAR ID of the SVM */ + uint64_t start_gpa, /* start guest physical address */ + uint64_t size, /* size of address range in bytes */ + uint64_t flags /* reserved for future expansion */ + uint16_t slotid) /* slot identifier */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_PARAMETER if ``lpid`` is invalid. + * U_P2 if ``start_gpa`` is invalid. + * U_P3 if ``size`` is invalid. + * U_P4 if any bit in the ``flags`` is unrecognized. + * U_P5 if the ``slotid`` parameter is unsupported. + * U_PERMISSION if called from context other than Hypervisor. + * U_FUNCTION if functionality is not supported. + + +Description +~~~~~~~~~~~ + + Register a memory range for an SVM. The memory range starts at the + guest physical address ``start_gpa`` and is ``size`` bytes long. + +Use cases +~~~~~~~~~ + + + #. When a virtual machine goes secure, all the memory slots managed by + the Hypervisor move into secure memory. The Hypervisor iterates + through each of memory slots, and registers the slot with + Ultravisor. Hypervisor may discard some slots such as those used + for firmware (SLOF). + + #. When new memory is hot-plugged, a new memory slot gets registered. + + +UV_UNREGISTER_MEM_SLOT +---------------------- + + Unregister an SVM address-range that was previously registered using + UV_REGISTER_MEM_SLOT. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_UNREGISTER_MEM_SLOT, + uint64_t lpid, /* LPAR ID of the SVM */ + uint64_t slotid) /* reservation slotid */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_FUNCTION if functionality is not supported. + * U_PARAMETER if ``lpid`` is invalid. + * U_P2 if ``slotid`` is invalid. + * U_PERMISSION if called from context other than Hypervisor. + +Description +~~~~~~~~~~~ + + Release the memory slot identified by ``slotid`` and free any + resources allocated towards the reservation. + +Use cases +~~~~~~~~~ + + #. Memory hot-remove. + + +UV_SVM_TERMINATE +---------------- + + Terminate an SVM and release its resources. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_SVM_TERMINATE, + uint64_t lpid, /* LPAR ID of the SVM */) + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_FUNCTION if functionality is not supported. + * U_PARAMETER if ``lpid`` is invalid. + * U_INVALID if VM is not secure. + * U_PERMISSION if not called from a Hypervisor context. + +Description +~~~~~~~~~~~ + + Terminate an SVM and release all its resources. + +Use cases +~~~~~~~~~ + + #. Called by Hypervisor when terminating an SVM. + + +Ultracalls used by SVM +====================== + +UV_SHARE_PAGE +------------- + + Share a set of guest physical pages with the Hypervisor. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_SHARE_PAGE, + uint64_t gfn, /* guest page frame number */ + uint64_t num) /* number of pages of size PAGE_SIZE */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_FUNCTION if functionality is not supported. + * U_INVALID if the VM is not secure. + * U_PARAMETER if ``gfn`` is invalid. + * U_P2 if ``num`` is invalid. + +Description +~~~~~~~~~~~ + + Share the ``num`` pages starting at guest physical frame number ``gfn`` + with the Hypervisor. Assume page size is PAGE_SIZE bytes. Zero the + pages before returning. + + If the address is already backed by a secure page, unmap the page and + back it with an insecure page, with the help of the Hypervisor. If it + is not backed by any page yet, mark the PTE as insecure and back it + with an insecure page when the address is accessed. If it is already + backed by an insecure page, zero the page and return. + +Use cases +~~~~~~~~~ + + #. The Hypervisor cannot access the SVM pages since they are backed by + secure pages. Hence an SVM must explicitly request Ultravisor for + pages it can share with Hypervisor. + + #. Shared pages are needed to support virtio and Virtual Processor Area + (VPA) in SVMs. + + +UV_UNSHARE_PAGE +--------------- + + Restore a shared SVM page to its initial state. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_UNSHARE_PAGE, + uint64_t gfn, /* guest page frame number */ + uint73 num) /* number of pages of size PAGE_SIZE*/ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_FUNCTION if functionality is not supported. + * U_INVALID if VM is not secure. + * U_PARAMETER if ``gfn`` is invalid. + * U_P2 if ``num`` is invalid. + +Description +~~~~~~~~~~~ + + Stop sharing ``num`` pages starting at ``gfn`` with the Hypervisor. + Assume that the page size is PAGE_SIZE. Zero the pages before + returning. + + If the address is already backed by an insecure page, unmap the page + and back it with a secure page. Inform the Hypervisor to release + reference to its shared page. If the address is not backed by a page + yet, mark the PTE as secure and back it with a secure page when that + address is accessed. If it is already backed by an secure page zero + the page and return. + +Use cases +~~~~~~~~~ + + #. The SVM may decide to unshare a page from the Hypervisor. + + +UV_UNSHARE_ALL_PAGES +-------------------- + + Unshare all pages the SVM has shared with Hypervisor. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_UNSHARE_ALL_PAGES) + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success. + * U_FUNCTION if functionality is not supported. + * U_INVAL if VM is not secure. + +Description +~~~~~~~~~~~ + + Unshare all shared pages from the Hypervisor. All unshared pages are + zeroed on return. Only pages explicitly shared by the SVM with the + Hypervisor (using UV_SHARE_PAGE ultracall) are unshared. Ultravisor + may internally share some pages with the Hypervisor without explicit + request from the SVM. These pages will not be unshared by this + ultracall. + +Use cases +~~~~~~~~~ + + #. This call is needed when ``kexec`` is used to boot a different + kernel. It may also be needed during SVM reset. + +UV_ESM +------ + + Secure the virtual machine (*enter secure mode*). + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t ultracall(const uint64_t UV_ESM, + uint64_t esm_blob_addr, /* location of the ESM blob */ + unint64_t fdt) /* Flattened device tree */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * U_SUCCESS on success (including if VM is already secure). + * U_FUNCTION if functionality is not supported. + * U_INVALID if VM is not secure. + * U_PARAMETER if ``esm_blob_addr`` is invalid. + * U_P2 if ``fdt`` is invalid. + * U_PERMISSION if any integrity checks fail. + * U_RETRY insufficient memory to create SVM. + * U_NO_KEY symmetric key unavailable. + +Description +~~~~~~~~~~~ + + Secure the virtual machine. On successful completion, return + control to the virtual machine at the address specified in the + ESM blob. + +Use cases +~~~~~~~~~ + + #. A normal virtual machine can choose to switch to a secure mode. + +Hypervisor Calls API +#################### + + This document describes the Hypervisor calls (hypercalls) that are + needed to support the Ultravisor. Hypercalls are services provided by + the Hypervisor to virtual machines and Ultravisor. + + Register usage for these hypercalls is identical to that of the other + hypercalls defined in the Power Architecture Platform Reference (PAPR) + document. i.e on input, register R3 identifies the specific service + that is being requested and registers R4 through R11 contain + additional parameters to the hypercall, if any. On output, register + R3 contains the return value and registers R4 through R9 contain any + other output values from the hypercall. + + This document only covers hypercalls currently implemented/planned + for Ultravisor usage but others can be added here when it makes sense. + + The full specification for all hypercalls/ultracalls will eventually + be made available in the public/OpenPower version of the PAPR + specification. + +Hypervisor calls to support Ultravisor +====================================== + + Following are the set of hypercalls needed to support Ultravisor. + +H_SVM_INIT_START +---------------- + + Begin the process of converting a normal virtual machine into an SVM. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t hypercall(const uint64_t H_SVM_INIT_START) + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * H_SUCCESS on success. + +Description +~~~~~~~~~~~ + + Initiate the process of securing a virtual machine. This involves + coordinating with the Ultravisor, using ultracalls, to allocate + resources in the Ultravisor for the new SVM, transferring the VM's + pages from normal to secure memory etc. When the process is + completed, Ultravisor issues the H_SVM_INIT_DONE hypercall. + +Use cases +~~~~~~~~~ + + #. Ultravisor uses this hypercall to inform Hypervisor that a VM + has initiated the process of switching to secure mode. + + +H_SVM_INIT_DONE +--------------- + + Complete the process of securing an SVM. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t hypercall(const uint64_t H_SVM_INIT_DONE) + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * H_SUCCESS on success. + * H_UNSUPPORTED if called from the wrong context (e.g. + from an SVM or before an H_SVM_INIT_START + hypercall). + +Description +~~~~~~~~~~~ + + Complete the process of securing a virtual machine. This call must + be made after a prior call to ``H_SVM_INIT_START`` hypercall. + +Use cases +~~~~~~~~~ + + On successfully securing a virtual machine, the Ultravisor informs + Hypervisor about it. Hypervisor can use this call to finish setting + up its internal state for this virtual machine. + + +H_SVM_PAGE_IN +------------- + + Move the contents of a page from normal memory to secure memory. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t hypercall(const uint64_t H_SVM_PAGE_IN, + uint64_t guest_pa, /* guest-physical-address */ + uint64_t flags, /* flags */ + uint64_t order) /* page size order */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * H_SUCCESS on success. + * H_PARAMETER if ``guest_pa`` is invalid. + * H_P2 if ``flags`` is invalid. + * H_P3 if ``order`` of page is invalid. + +Description +~~~~~~~~~~~ + + Retrieve the content of the page, belonging to the VM at the specified + guest physical address. + + Only valid value(s) in ``flags`` are: + + * H_PAGE_IN_SHARED which indicates that the page is to be shared + with the Ultravisor. + + * H_PAGE_IN_NONSHARED indicates that the UV is not anymore + interested in the page. Applicable if the page is a shared page. + + The ``order`` parameter must correspond to the configured page size. + +Use cases +~~~~~~~~~ + + #. When a normal VM becomes a secure VM (using the UV_ESM ultracall), + the Ultravisor uses this hypercall to move contents of each page of + the VM from normal memory to secure memory. + + #. Ultravisor uses this hypercall to ask Hypervisor to provide a page + in normal memory that can be shared between the SVM and Hypervisor. + + #. Ultravisor uses this hypercall to page-in a paged-out page. This + can happen when the SVM touches a paged-out page. + + #. If SVM wants to disable sharing of pages with Hypervisor, it can + inform Ultravisor to do so. Ultravisor will then use this hypercall + and inform Hypervisor that it has released access to the normal + page. + +H_SVM_PAGE_OUT +--------------- + + Move the contents of the page to normal memory. + +Syntax +~~~~~~ + +.. code-block:: c + + uint64_t hypercall(const uint64_t H_SVM_PAGE_OUT, + uint64_t guest_pa, /* guest-physical-address */ + uint64_t flags, /* flags (currently none) */ + uint64_t order) /* page size order */ + +Return values +~~~~~~~~~~~~~ + + One of the following values: + + * H_SUCCESS on success. + * H_PARAMETER if ``guest_pa`` is invalid. + * H_P2 if ``flags`` is invalid. + * H_P3 if ``order`` is invalid. + +Description +~~~~~~~~~~~ + + Move the contents of the page identified by ``guest_pa`` to normal + memory. + + Currently ``flags`` is unused and must be set to 0. The ``order`` + parameter must correspond to the configured page size. + +Use cases +~~~~~~~~~ + + #. If Ultravisor is running low on secure pages, it can move the + contents of some secure pages, into normal pages using this + hypercall. The content will be encrypted. + +References +########## + +- `Supporting Protected Computing on IBM Power Architecture `_ diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst index fbb314bfa11270..55993055902c62 100644 --- a/Documentation/trace/kprobetrace.rst +++ b/Documentation/trace/kprobetrace.rst @@ -52,6 +52,7 @@ Synopsis of kprobe_events $retval : Fetch return value.(\*2) $comm : Fetch current task comm. +|-[u]OFFS(FETCHARG) : Fetch memory at FETCHARG +|- OFFS address.(\*3)(\*4) + \IMM : Store an immediate value to the argument. NAME=FETCHARG : Set NAME as the argument name of FETCHARG. FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types diff --git a/Documentation/trace/uprobetracer.rst b/Documentation/trace/uprobetracer.rst index 6e75a6c5a2c86f..98cde99939d73f 100644 --- a/Documentation/trace/uprobetracer.rst +++ b/Documentation/trace/uprobetracer.rst @@ -45,6 +45,7 @@ Synopsis of uprobe_tracer $retval : Fetch return value.(\*1) $comm : Fetch current task comm. +|-[u]OFFS(FETCHARG) : Fetch memory at FETCHARG +|- OFFS address.(\*2)(\*3) + \IMM : Store an immediate value to the argument. NAME=FETCHARG : Set NAME as the argument name of FETCHARG. FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types diff --git a/Kbuild b/Kbuild index 8637fd14135f2c..3109ac786e7625 100644 --- a/Kbuild +++ b/Kbuild @@ -18,8 +18,6 @@ $(bounds-file): kernel/bounds.s FORCE timeconst-file := include/generated/timeconst.h -targets += $(timeconst-file) - filechk_gentimeconst = echo $(CONFIG_HZ) | bc -q $< $(timeconst-file): kernel/time/timeconst.bc FORCE @@ -42,7 +40,6 @@ $(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE # Check for missing system calls always += missing-syscalls -targets += missing-syscalls quiet_cmd_syscalls = CALL $< cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags) @@ -54,13 +51,9 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE # Check atomic headers are up-to-date always += old-atomics -targets += old-atomics quiet_cmd_atomics = CALL $< cmd_atomics = $(CONFIG_SHELL) $< old-atomics: scripts/atomic/check-atomics.sh FORCE $(call cmd,atomics) - -# Keep these three files during make clean -no-clean-files := $(bounds-file) $(offsets-file) $(timeconst-file) diff --git a/MAINTAINERS b/MAINTAINERS index c740cf3f93efc3..c590a37e87a1f7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11867,6 +11867,7 @@ S: Maintained F: arch/arm/mach-omap2/ F: arch/arm/plat-omap/ F: arch/arm/configs/omap2plus_defconfig +F: drivers/bus/ti-sysc.c F: drivers/i2c/busses/i2c-omap.c F: drivers/irqchip/irq-omap-intc.c F: drivers/mfd/*omap*.c @@ -11887,6 +11888,7 @@ F: drivers/regulator/tps65910-regulator.c F: drivers/regulator/twl-regulator.c F: drivers/regulator/twl6030-regulator.c F: include/linux/platform_data/i2c-omap.h +F: include/linux/platform_data/ti-sysc.h ONION OMEGA2+ BOARD M: Harvey Hunt @@ -13451,9 +13453,8 @@ S: Supported F: drivers/i2c/busses/i2c-qcom-geni.c QUALCOMM HEXAGON ARCHITECTURE -M: Richard Kuo +M: Brian Cain L: linux-hexagon@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.git S: Supported F: arch/hexagon/ diff --git a/Makefile b/Makefile index 2fa7cfa6fca391..656a8c95789df7 100644 --- a/Makefile +++ b/Makefile @@ -230,6 +230,8 @@ endif export KBUILD_CHECKSRC KBUILD_EXTMOD +extmod-prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/) + ifeq ($(abs_srctree),$(abs_objtree)) # building in the source tree srctree := . @@ -271,52 +273,62 @@ no-dot-config-targets := $(clean-targets) \ %asm-generic kernelversion %src-pkg no-sync-config-targets := $(no-dot-config-targets) install %install \ kernelrelease +single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/ -config-targets := 0 -mixed-targets := 0 -dot-config := 1 -may-sync-config := 1 +config-build := +mixed-build := +need-config := 1 +may-sync-config := 1 +single-build := ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) - dot-config := 0 + need-config := endif endif ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),) ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),) - may-sync-config := 0 + may-sync-config := endif endif ifneq ($(KBUILD_EXTMOD),) - may-sync-config := 0 + may-sync-config := endif ifeq ($(KBUILD_EXTMOD),) ifneq ($(filter config %config,$(MAKECMDGOALS)),) - config-targets := 1 + config-build := 1 ifneq ($(words $(MAKECMDGOALS)),1) - mixed-targets := 1 + mixed-build := 1 endif endif endif +# We cannot build single targets and the others at the same time +ifneq ($(filter $(single-targets), $(MAKECMDGOALS)),) + single-build := 1 + ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),) + mixed-build := 1 + endif +endif + # For "make -j clean all", "make -j mrproper defconfig all", etc. ifneq ($(filter $(clean-targets),$(MAKECMDGOALS)),) ifneq ($(filter-out $(clean-targets),$(MAKECMDGOALS)),) - mixed-targets := 1 + mixed-build := 1 endif endif # install and modules_install need also be processed one by one ifneq ($(filter install,$(MAKECMDGOALS)),) ifneq ($(filter modules_install,$(MAKECMDGOALS)),) - mixed-targets := 1 + mixed-build := 1 endif endif -ifeq ($(mixed-targets),1) +ifdef mixed-build # =========================================================================== # We're called with mixed targets (*config and build targets). # Handle them one by one. @@ -332,7 +344,7 @@ __build_one_by_one: $(MAKE) -f $(srctree)/Makefile $$i; \ done -else +else # !mixed-build include scripts/Kbuild.include @@ -392,9 +404,7 @@ KCONFIG_CONFIG ?= .config export KCONFIG_CONFIG # SHELL used by kbuild -CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ - else if [ -x /bin/bash ]; then echo /bin/bash; \ - else echo sh; fi ; fi) +CONFIG_SHELL := sh HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) @@ -431,6 +441,7 @@ PYTHON = python PYTHON2 = python2 PYTHON3 = python3 CHECK = sparse +BASH = bash CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) @@ -470,12 +481,13 @@ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE -KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds +KBUILD_LDFLAGS_MODULE := +export KBUILD_LDS_MODULE := $(srctree)/scripts/module-common.lds KBUILD_LDFLAGS := GCC_PLUGINS_CFLAGS := CLANG_FLAGS := -export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC +export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE @@ -506,6 +518,7 @@ scripts_basic: $(Q)rm -f .tmp_quiet_recordmcount PHONY += outputmakefile +# Before starting out-of-tree build, make sure the source tree is clean. # outputmakefile generates a Makefile in the output directory, if using a # separate output directory. This allows convenient use of make in the # output directory. @@ -513,6 +526,15 @@ PHONY += outputmakefile # ignore whole output directory outputmakefile: ifdef building_out_of_srctree + $(Q)if [ -f $(srctree)/.config -o \ + -d $(srctree)/include/config -o \ + -d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \ + echo >&2 "***"; \ + echo >&2 "*** The source tree is not clean, please run 'make$(if $(findstring command line, $(origin ARCH)), ARCH=$(ARCH)) mrproper'"; \ + echo >&2 "*** in $(abs_srctree)";\ + echo >&2 "***"; \ + false; \ + fi $(Q)ln -fsn $(srctree) source $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) $(Q)test -e .gitignore || \ @@ -544,7 +566,7 @@ endif # and from include/config/auto.conf.cmd to detect the compiler upgrade. CC_VERSION_TEXT = $(shell $(CC) --version 2>/dev/null | head -n 1) -ifeq ($(config-targets),1) +ifdef config-build # =========================================================================== # *config targets only - make sure prerequisites are updated, and descend # in scripts/kconfig to make the *config target @@ -555,13 +577,13 @@ ifeq ($(config-targets),1) include arch/$(SRCARCH)/Makefile export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT -config: scripts_basic outputmakefile FORCE +config: outputmakefile scripts_basic FORCE $(Q)$(MAKE) $(build)=scripts/kconfig $@ -%config: scripts_basic outputmakefile FORCE +%config: outputmakefile scripts_basic FORCE $(Q)$(MAKE) $(build)=scripts/kconfig $@ -else +else #!config-build # =========================================================================== # Build targets only - this includes vmlinux, arch specific targets, clean # targets and others. In general all targets except *config targets. @@ -604,7 +626,7 @@ endif export KBUILD_MODULES KBUILD_BUILTIN -ifeq ($(dot-config),1) +ifdef need-config include include/config/auto.conf endif @@ -645,15 +667,10 @@ RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc export RETPOLINE_CFLAGS export RETPOLINE_VDSO_CFLAGS -# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default -# values of the respective KBUILD_* variables -ARCH_CPPFLAGS := -ARCH_AFLAGS := -ARCH_CFLAGS := include arch/$(SRCARCH)/Makefile -ifeq ($(dot-config),1) -ifeq ($(may-sync-config),1) +ifdef need-config +ifdef may-sync-config # Read in dependencies to all Kconfig* files, make sure to run syncconfig if # changes are detected. This should be included after arch/$(SRCARCH)/Makefile # because some architectures define CROSS_COMPILE there. @@ -676,7 +693,7 @@ $(KCONFIG_CONFIG): # The syncconfig should be executed only once to make all the targets. %/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG) $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig -else +else # !may-sync-config # External modules and some install targets need include/generated/autoconf.h # and include/config/auto.conf but do not care if they are up-to-date. # Use auto.conf to trigger the test @@ -692,7 +709,7 @@ include/config/auto.conf: /bin/false) endif # may-sync-config -endif # $(dot-config) +endif # need-config KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) @@ -700,10 +717,12 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) -ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += -Os -else -KBUILD_CFLAGS += -O2 +ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE +KBUILD_CFLAGS += -O2 +else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 +KBUILD_CFLAGS += -O3 +else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +KBUILD_CFLAGS += -Os endif ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED @@ -751,6 +770,11 @@ else # These warnings generated too much noise in a regular build. # Use make W=1 to enable them (see scripts/Makefile.extrawarn) KBUILD_CFLAGS += -Wno-unused-but-set-variable + +# Warn about unmarked fall-throughs in switch statement. +# Disabled for clang while comment to attribute conversion happens and +# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed. +KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) endif KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) @@ -845,9 +869,6 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) # warn about C99 declaration after statement KBUILD_CFLAGS += -Wdeclaration-after-statement -# Warn about unmarked fall-throughs in switch statement. -KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) - # Variable Length Arrays (VLAs) should not be used anywhere in the kernel KBUILD_CFLAGS += -Wvla @@ -900,11 +921,10 @@ include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan -# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the -# last assignments -KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS) -KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS) -KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS) +# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments +KBUILD_CPPFLAGS += $(KCPPFLAGS) +KBUILD_AFLAGS += $(KAFLAGS) +KBUILD_CFLAGS += $(KCFLAGS) KBUILD_LDFLAGS_MODULE += --build-id LDFLAGS_vmlinux += --build-id @@ -1007,7 +1027,7 @@ endif PHONY += prepare0 -export MODORDER := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order +export MODORDER := $(extmod-prefix)modules.order ifeq ($(KBUILD_EXTMOD),) core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ @@ -1020,6 +1040,9 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \ $(patsubst %/,%,$(filter %/, $(init-) $(core-) \ $(drivers-) $(net-) $(libs-) $(virt-)))) +build-dirs := $(vmlinux-dirs) +clean-dirs := $(vmlinux-alldirs) + init-y := $(patsubst %/, %/built-in.a, $(init-y)) core-y := $(patsubst %/, %/built-in.a, $(core-y)) drivers-y := $(patsubst %/, %/built-in.a, $(drivers-y)) @@ -1042,7 +1065,7 @@ vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS) # Recurse until adjust_autoksyms.sh is satisfied PHONY += autoksyms_recursive ifdef CONFIG_TRIM_UNUSED_KSYMS -autoksyms_recursive: $(vmlinux-deps) modules.order +autoksyms_recursive: descend modules.order $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ "$(MAKE) -f $(srctree)/Makefile vmlinux" endif @@ -1074,17 +1097,7 @@ targets := vmlinux # The actual objects are generated when descending, # make sure no implicit rule kicks in -$(sort $(vmlinux-deps)): $(vmlinux-dirs) ; - -# Handle descending into subdirectories listed in $(vmlinux-dirs) -# Preset locale variables to speed up the build process. Limit locale -# tweaks to this spot to avoid wrong language settings when running -# make menuconfig etc. -# Error messages still appears in the original language - -PHONY += $(vmlinux-dirs) -$(vmlinux-dirs): prepare - $(Q)$(MAKE) $(build)=$@ need-builtin=1 need-modorder=1 +$(sort $(vmlinux-deps)): descend ; filechk_kernel.release = \ echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" @@ -1106,24 +1119,9 @@ scripts: scripts_basic scripts_dtc # archprepare is used in arch Makefiles and when processed asm symlink, # version.h and scripts_basic is processed / created. -PHONY += prepare archprepare prepare3 +PHONY += prepare archprepare -# prepare3 is used to check if we are building in a separate output directory, -# and if so do: -# 1) Check that make has not been executed in the kernel src $(srctree) -prepare3: include/config/kernel.release -ifdef building_out_of_srctree - @$(kecho) ' Using $(srctree) as source for kernel' - $(Q)if [ -f $(srctree)/.config -o \ - -d $(srctree)/include/config -o \ - -d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \ - echo >&2 " $(srctree) is not clean, please run 'make ARCH=$(ARCH) mrproper'"; \ - echo >&2 " in the '$(srctree)' directory.";\ - /bin/false; \ - fi; -endif - -archprepare: archheaders archscripts scripts prepare3 outputmakefile \ +archprepare: outputmakefile archheaders archscripts scripts include/config/kernel.release \ asm-generic $(version_h) $(autoksyms_h) include/generated/utsrelease.h prepare0: archprepare @@ -1248,7 +1246,7 @@ kselftest-merge: $(if $(wildcard $(objtree)/.config),, $(error No .config exists, config your kernel first!)) $(Q)find $(srctree)/tools/testing/selftests -name config | \ xargs $(srctree)/scripts/kconfig/merge_config.sh -m $(objtree)/.config - +$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig + $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig # --------------------------------------------------------------------------- # Devicetree files @@ -1259,11 +1257,11 @@ endif ifneq ($(dtstree),) -%.dtb: prepare3 scripts_dtc +%.dtb: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ PHONY += dtbs dtbs_install dt_binding_check -dtbs dtbs_check: prepare3 scripts_dtc +dtbs dtbs_check: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) dtbs_check: export CHECK_DTBS=1 @@ -1302,17 +1300,16 @@ all: modules PHONY += modules modules: $(if $(KBUILD_BUILTIN),vmlinux) modules.order modules.builtin - @$(kecho) ' Building modules, stage 2.'; $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $(Q)$(CONFIG_SHELL) $(srctree)/scripts/modules-check.sh -modules.order: $(vmlinux-dirs) - $(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(vmlinux-dirs)) > $@ +modules.order: descend + $(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(build-dirs)) > $@ -modbuiltin-dirs := $(addprefix _modbuiltin_, $(vmlinux-dirs)) +modbuiltin-dirs := $(addprefix _modbuiltin_, $(build-dirs)) modules.builtin: $(modbuiltin-dirs) - $(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(vmlinux-dirs)) > $@ + $(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(build-dirs)) > $@ PHONY += $(modbuiltin-dirs) # tristate.conf is not included from this Makefile. Add it as a prerequisite @@ -1385,12 +1382,14 @@ CLEAN_FILES += modules.builtin.modinfo # Directories & files removed with 'make mrproper' MRPROPER_DIRS += include/config include/generated \ - arch/$(SRCARCH)/include/generated .tmp_objdiff + arch/$(SRCARCH)/include/generated .tmp_objdiff \ + debian/ snap/ tar-install/ MRPROPER_FILES += .config .config.old .version \ Module.symvers \ signing_key.pem signing_key.priv signing_key.x509 \ x509.genkey extra_certificates signing_key.x509.keyid \ - signing_key.x509.signer vmlinux-gdb.py + signing_key.x509.signer vmlinux-gdb.py \ + *.spec # Directories & files removed with 'make distclean' DISTCLEAN_DIRS += @@ -1400,11 +1399,8 @@ DISTCLEAN_FILES += tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS # clean: rm-dirs := $(CLEAN_DIRS) clean: rm-files := $(CLEAN_FILES) -clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs)) -PHONY += $(clean-dirs) clean archclean vmlinuxclean -$(clean-dirs): - $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) +PHONY += archclean vmlinuxclean vmlinuxclean: $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean @@ -1445,13 +1441,11 @@ distclean: mrproper # Packaging of the kernel to various formats # --------------------------------------------------------------------------- -package-dir := scripts/package %src-pkg: FORCE - $(Q)$(MAKE) $(build)=$(package-dir) $@ + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@ %pkg: include/config/kernel.release FORCE - $(Q)$(MAKE) $(build)=$(package-dir) $@ - + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@ # Brief documentation of the typical targets used # --------------------------------------------------------------------------- @@ -1525,7 +1519,7 @@ help: @echo ' or "cd tools; make help"' @echo '' @echo 'Kernel packaging:' - @$(MAKE) $(build)=$(package-dir) help + @$(MAKE) -f $(srctree)/scripts/Makefile.package help @echo '' @echo 'Documentation targets:' @$(MAKE) -f $(srctree)/Documentation/Makefile dochelp @@ -1550,7 +1544,7 @@ help: @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' - @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' + @echo ' make W=n [targets] Enable extra build checks, n=1,2,3 where' @echo ' 1: warnings which may be relevant and do not occur too often' @echo ' 2: warnings which occur quite often but may still be relevant' @echo ' 3: more obscure warnings, can most likely be ignored' @@ -1579,7 +1573,7 @@ $(help-board-dirs): help-%: DOC_TARGETS := xmldocs latexdocs pdfdocs htmldocs epubdocs cleandocs \ linkcheckdocs dochelp refcheckdocs PHONY += $(DOC_TARGETS) -$(DOC_TARGETS): scripts_basic FORCE +$(DOC_TARGETS): $(Q)$(MAKE) $(build)=Documentation $@ # Misc @@ -1624,13 +1618,9 @@ $(objtree)/Module.symvers: echo " is missing; modules will have no dependencies and modversions."; \ echo ) -module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD)) -PHONY += $(module-dirs) modules -$(module-dirs): prepare $(objtree)/Module.symvers - $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) need-modorder=1 - -modules: $(module-dirs) - @$(kecho) ' Building modules, stage 2.'; +build-dirs := $(KBUILD_EXTMOD) +PHONY += modules +modules: descend $(objtree)/Module.symvers $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost PHONY += modules_install @@ -1646,14 +1636,13 @@ PHONY += _emodinst_post _emodinst_post: _emodinst_ $(call cmd,depmod) -clean-dirs := $(addprefix _clean_,$(KBUILD_EXTMOD)) - -PHONY += $(clean-dirs) clean -$(clean-dirs): - $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) - +clean-dirs := $(KBUILD_EXTMOD) clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers +PHONY += / +/: + @echo >&2 '"$(MAKE) /" is no longer supported. Please use "$(MAKE) ./" instead.' + PHONY += help help: @echo ' Building external modules.' @@ -1667,6 +1656,21 @@ help: PHONY += prepare endif # KBUILD_EXTMOD +# Handle descending into subdirectories listed in $(build-dirs) +# Preset locale variables to speed up the build process. Limit locale +# tweaks to this spot to avoid wrong language settings when running +# make menuconfig etc. +# Error messages still appears in the original language +PHONY += descend $(build-dirs) +descend: $(build-dirs) +$(build-dirs): prepare + $(Q)$(MAKE) $(build)=$@ single-build=$(single-build) need-builtin=1 need-modorder=1 + +clean-dirs := $(addprefix _clean_, $(clean-dirs)) +PHONY += $(clean-dirs) clean +$(clean-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) + clean: $(clean-dirs) $(call cmd,rmdirs) $(call cmd,rmfiles) @@ -1688,7 +1692,7 @@ clean: $(clean-dirs) # Generate tags for editors # --------------------------------------------------------------------------- quiet_cmd_tags = GEN $@ - cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@ + cmd_tags = $(BASH) $(srctree)/scripts/tags.sh $@ tags TAGS cscope gtags: FORCE $(call cmd,tags) @@ -1709,7 +1713,7 @@ versioncheck: | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl coccicheck: - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/$@ + $(Q)$(BASH) $(srctree)/scripts/$@ namespacecheck: $(PERL) $(srctree)/scripts/namespace.pl @@ -1757,45 +1761,47 @@ tools/%: FORCE # Single targets # --------------------------------------------------------------------------- -# Single targets are compatible with: -# - build with mixed source and output -# - build with separate output dir 'make O=...' -# - external modules +# To build individual files in subdirectories, you can do like this: +# +# make foo/bar/baz.s # -# target-dir => where to store outputfile -# build-dir => directory in kernel source tree to use - -build-target = $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD)/)$@ -build-dir = $(patsubst %/,%,$(dir $(build-target))) - -%.i: prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) -%.ll: prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) -%.lst: prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) -%.o: prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) -%.s: prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) -%.symtypes: prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +# The supported suffixes for single-target are listed in 'single-targets' +# +# To build only under specific subdirectories, you can do like this: +# +# make foo/bar/baz/ + +ifdef single-build + +single-all := $(filter $(single-targets), $(MAKECMDGOALS)) + +# .ko is special because modpost is needed +single-ko := $(sort $(filter %.ko, $(single-all))) +single-no-ko := $(sort $(patsubst %.ko,%.mod, $(single-all))) + +$(single-ko): single_modpost + @: +$(single-no-ko): descend + @: + ifeq ($(KBUILD_EXTMOD),) -# For the single build of an in-tree module, use a temporary file to avoid +# For the single build of in-tree modules, use a temporary file to avoid # the situation of modules_install installing an invalid modules.order. -%.ko: MODORDER := .modules.tmp +MODORDER := .modules.tmp endif -%.ko: prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(build-target:.ko=.mod) - $(Q)echo $(build-target) > $(MODORDER) + +PHONY += single_modpost +single_modpost: $(single-no-ko) + $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER) $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost -# Modules -PHONY += / -/: ./ +KBUILD_MODULES := 1 -%/: prepare FORCE - $(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir) need-modorder=1 +export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko)) + +single-build = $(if $(filter-out $@/, $(single-no-ko)),1) + +endif # FIXME Should go into a make.lib or something # =========================================================================== @@ -1816,9 +1822,9 @@ existing-targets := $(wildcard $(sort $(targets))) -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) -endif # ifeq ($(config-targets),1) -endif # ifeq ($(mixed-targets),1) -endif # need-sub-make +endif # config-targets +endif # mixed-build +endif # need-sub-make PHONY += FORCE FORCE: diff --git a/arch/Kconfig b/arch/Kconfig index c4b2afa138cada..0fcf8ec1e09883 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -292,6 +292,13 @@ config ARCH_32BIT_OFF_T still support 32-bit off_t. This option is enabled for all such architectures explicitly. +config HAVE_ASM_MODVERSIONS + bool + help + This symbol should be selected by an architecure if it provides + to support the module versioning for symbols + exported from assembly code. + config HAVE_REGS_AND_STACK_ACCESS_API bool help @@ -939,6 +946,9 @@ config RELR well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy are compatible). +config ARCH_HAS_MEM_ENCRYPT + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index f7b19b813a7019..ef179033a7c214 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -11,6 +11,7 @@ config ALPHA select PCI_DOMAINS if PCI select PCI_SYSCALL if PCI select HAVE_AOUT + select HAVE_ASM_MODVERSIONS select HAVE_IDE select HAVE_OPROFILE select HAVE_PCSPKR_PLATFORM diff --git a/arch/arc/Makefile b/arch/arc/Makefile index ee6d1184c2b1b5..f1c44cccf8d6c4 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -48,14 +48,6 @@ endif cfi := $(call as-instr,.cfi_startproc\n.cfi_endproc,-DARC_DW2_UNWIND_AS_CFI) cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi) -ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE -# Generic build system uses -O2, we want -O3 -# Note: No need to add to cflags-y as that happens anyways -# -# Disable the false maybe-uninitialized warings gcc spits out at -O3 -ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,) -endif - # small data is default for elf32 tool-chain. If not usable, disable it # This also allows repurposing GP as scratch reg to gcc reg allocator disable_small_data := y diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index e31a8ebc3eccb0..0016149f958304 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -9,6 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index e0e8567f0d7585..5b031582a1cf8b 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -9,6 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index fcbc952bc75bb9..d4eec39e0112c4 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -9,6 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index 436f2135bdc168..47ff8a97e42da6 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 33a787c375e2e5..9685fd5f57a482 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 403125d9c9a344..9b9a74444ce274 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -9,6 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_RAM=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index f0a077c00efa82..5978d4d7d5b079 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -6,6 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_SYSCTL_SYSCALL=y # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index de398c7b10b317..2b9b11474640c9 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 2dbd34a9ff0772..bab3dd25584162 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index c7135f1e2583f0..90d2d50fb8dc5d 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 385a71d3c47854..5dd470b6609ebf 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 248a2c3bdc1298..3532e86f7bff69 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 1a4bc7b660fb5d..d90448bee064f3 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -8,6 +8,7 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set CONFIG_KPROBES=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index dc739bd093e319..3a138f8c729945 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -14,6 +14,7 @@ CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio" CONFIG_INITRAMFS_ROOT_UID=2100 CONFIG_INITRAMFS_ROOT_GID=501 # CONFIG_RD_GZIP is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 0c3b214168197d..d7c858df520cc0 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -4,6 +4,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index f9ad9d3ee702d9..015c1d43889e69 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -4,6 +4,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arm/Makefile b/arch/arm/Makefile index f9002e44f18fb8..be2fc3e7943403 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -17,7 +17,7 @@ KBUILD_LDFLAGS_MODULE += --be8 endif ifeq ($(CONFIG_ARM_MODULE_PLTS),y) -KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds +KBUILD_LDS_MODULE += $(srctree)/arch/arm/kernel/module.lds endif GZFLAGS :=-9 diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index a24a6a132b0796..b21b3a64641a76 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -336,7 +336,8 @@ dtb-$(CONFIG_MACH_MESON8) += \ dtb-$(CONFIG_ARCH_MMP) += \ pxa168-aspenite.dtb \ pxa910-dkb.dtb \ - mmp2-brownstone.dtb + mmp2-brownstone.dtb \ + mmp2-olpc-xo-1-75.dtb dtb-$(CONFIG_ARCH_MPS2) += \ mps2-an385.dtb \ mps2-an399.dtb @@ -1278,6 +1279,7 @@ dtb-$(CONFIG_ARCH_MILBEAUT) += milbeaut-m10v-evb.dtb dtb-$(CONFIG_ARCH_ZX) += zx296702-ad1.dtb dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-ast2500-evb.dtb \ + aspeed-ast2600-evb.dtb \ aspeed-bmc-arm-centriq2400-rep.dtb \ aspeed-bmc-arm-stardragon4800-rep2.dtb \ aspeed-bmc-facebook-cmm.dtb \ diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 46849d6ecb3e26..9915c891e05fa5 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -673,7 +673,6 @@ target-module@100000 { /* 0x4a100000, ap 3 08.0 */ compatible = "ti,sysc-omap4-simple", "ti,sysc"; - ti,hwmods = "cpgmac0"; reg = <0x101200 0x4>, <0x101208 0x4>, <0x101204 0x4>; @@ -719,9 +718,10 @@ davinci_mdio: mdio@1000 { compatible = "ti,cpsw-mdio","ti,davinci_mdio"; + clocks = <&cpsw_125mhz_clkctrl AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <0>; - ti,hwmods = "davinci_mdio"; bus_freq = <1000000>; reg = <0x1000 0x100>; status = "disabled"; diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi index 23ea381d363fd1..bf3002009b002f 100644 --- a/arch/arm/boot/dts/am3517.dtsi +++ b/arch/arm/boot/dts/am3517.dtsi @@ -88,6 +88,30 @@ interrupts = <24>; clocks = <&hecc_ck>; }; + + /* + * On am3517 the OCP registers do not seem to be accessible + * similar to the omap34xx. Maybe SGX is permanently set to + * "OCP bypass mode", or maybe there is OCP_SYSCONFIG that is + * write-only at 0x50000e10. We detect SGX based on the SGX + * revision register instead of the unreadable OCP revision + * register. + */ + sgx_module: target-module@50000000 { + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x50000014 0x4>; + reg-names = "rev"; + clocks = <&sgx_fck>, <&sgx_ick>; + clock-names = "fck", "ick"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x50000000 0x4000>; + + /* + * Closed source PowerVR driver, no child device + * binding or driver in mainline + */ + }; }; }; diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index 04bee4ff9dcb86..59770dd3785eed 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -512,7 +512,6 @@ target-module@100000 { /* 0x4a100000, ap 3 04.0 */ compatible = "ti,sysc-omap4-simple", "ti,sysc"; - ti,hwmods = "cpgmac0"; reg = <0x101200 0x4>, <0x101208 0x4>, <0x101204 0x4>; @@ -559,11 +558,10 @@ davinci_mdio: mdio@1000 { compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio"; reg = <0x1000 0x100>; + clocks = <&cpsw_125mhz_clkctrl AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <0>; - clocks = <&cpsw_125mhz_gclk>; - clock-names = "fck"; - ti,hwmods = "davinci_mdio"; bus_freq = <1000000>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts new file mode 100644 index 00000000000000..9870553919b7c4 --- /dev/null +++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2019 IBM Corp. + +/dts-v1/; + +#include "aspeed-g6.dtsi" + +/ { + model = "AST2600 EVB"; + compatible = "aspeed,ast2600"; + + aliases { + serial4 = &uart5; + }; + + chosen { + bootargs = "console=ttyS4,115200n8"; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x80000000>; + }; +}; + +&mdio1 { + status = "okay"; + + ethphy1: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; +}; + +&mdio2 { + status = "okay"; + + ethphy2: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; +}; + +&mdio3 { + status = "okay"; + + ethphy3: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; +}; + +&mac1 { + status = "okay"; + + phy-mode = "rgmii"; + phy-handle = <ðphy1>; +}; + +&mac2 { + status = "okay"; + + phy-mode = "rgmii"; + phy-handle = <ðphy2>; +}; + +&mac3 { + status = "okay"; + + phy-mode = "rgmii"; + phy-handle = <ðphy3>; +}; + +&emmc { + status = "okay"; +}; + +&rtc { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi new file mode 100644 index 00000000000000..5b8bf58e89cb4a --- /dev/null +++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi @@ -0,0 +1,1154 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2019 IBM Corp. + +&pinctrl { + pinctrl_adc0_default: adc0_default { + function = "ADC0"; + groups = "ADC0"; + }; + + pinctrl_adc1_default: adc1_default { + function = "ADC1"; + groups = "ADC1"; + }; + + pinctrl_adc10_default: adc10_default { + function = "ADC10"; + groups = "ADC10"; + }; + + pinctrl_adc11_default: adc11_default { + function = "ADC11"; + groups = "ADC11"; + }; + + pinctrl_adc12_default: adc12_default { + function = "ADC12"; + groups = "ADC12"; + }; + + pinctrl_adc13_default: adc13_default { + function = "ADC13"; + groups = "ADC13"; + }; + + pinctrl_adc14_default: adc14_default { + function = "ADC14"; + groups = "ADC14"; + }; + + pinctrl_adc15_default: adc15_default { + function = "ADC15"; + groups = "ADC15"; + }; + + pinctrl_adc2_default: adc2_default { + function = "ADC2"; + groups = "ADC2"; + }; + + pinctrl_adc3_default: adc3_default { + function = "ADC3"; + groups = "ADC3"; + }; + + pinctrl_adc4_default: adc4_default { + function = "ADC4"; + groups = "ADC4"; + }; + + pinctrl_adc5_default: adc5_default { + function = "ADC5"; + groups = "ADC5"; + }; + + pinctrl_adc6_default: adc6_default { + function = "ADC6"; + groups = "ADC6"; + }; + + pinctrl_adc7_default: adc7_default { + function = "ADC7"; + groups = "ADC7"; + }; + + pinctrl_adc8_default: adc8_default { + function = "ADC8"; + groups = "ADC8"; + }; + + pinctrl_adc9_default: adc9_default { + function = "ADC9"; + groups = "ADC9"; + }; + + pinctrl_bmcint_default: bmcint_default { + function = "BMCINT"; + groups = "BMCINT"; + }; + + pinctrl_espi_default: espi_default { + function = "ESPI"; + groups = "ESPI"; + }; + + pinctrl_espialt_default: espialt_default { + function = "ESPIALT"; + groups = "ESPIALT"; + }; + + pinctrl_fsi1_default: fsi1_default { + function = "FSI1"; + groups = "FSI1"; + }; + + pinctrl_fsi2_default: fsi2_default { + function = "FSI2"; + groups = "FSI2"; + }; + + pinctrl_fwspiabr_default: fwspiabr_default { + function = "FWSPIABR"; + groups = "FWSPIABR"; + }; + + pinctrl_fwspid_default: fwspid_default { + function = "FWSPID"; + groups = "FWSPID"; + }; + + pinctrl_fwqspid_default: fwqspid_default { + function = "FWQSPID"; + groups = "FWQSPID"; + }; + + pinctrl_fwspiwp_default: fwspiwp_default { + function = "FWSPIWP"; + groups = "FWSPIWP"; + }; + + pinctrl_gpit0_default: gpit0_default { + function = "GPIT0"; + groups = "GPIT0"; + }; + + pinctrl_gpit1_default: gpit1_default { + function = "GPIT1"; + groups = "GPIT1"; + }; + + pinctrl_gpit2_default: gpit2_default { + function = "GPIT2"; + groups = "GPIT2"; + }; + + pinctrl_gpit3_default: gpit3_default { + function = "GPIT3"; + groups = "GPIT3"; + }; + + pinctrl_gpit4_default: gpit4_default { + function = "GPIT4"; + groups = "GPIT4"; + }; + + pinctrl_gpit5_default: gpit5_default { + function = "GPIT5"; + groups = "GPIT5"; + }; + + pinctrl_gpit6_default: gpit6_default { + function = "GPIT6"; + groups = "GPIT6"; + }; + + pinctrl_gpit7_default: gpit7_default { + function = "GPIT7"; + groups = "GPIT7"; + }; + + pinctrl_gpiu0_default: gpiu0_default { + function = "GPIU0"; + groups = "GPIU0"; + }; + + pinctrl_gpiu1_default: gpiu1_default { + function = "GPIU1"; + groups = "GPIU1"; + }; + + pinctrl_gpiu2_default: gpiu2_default { + function = "GPIU2"; + groups = "GPIU2"; + }; + + pinctrl_gpiu3_default: gpiu3_default { + function = "GPIU3"; + groups = "GPIU3"; + }; + + pinctrl_gpiu4_default: gpiu4_default { + function = "GPIU4"; + groups = "GPIU4"; + }; + + pinctrl_gpiu5_default: gpiu5_default { + function = "GPIU5"; + groups = "GPIU5"; + }; + + pinctrl_gpiu6_default: gpiu6_default { + function = "GPIU6"; + groups = "GPIU6"; + }; + + pinctrl_gpiu7_default: gpiu7_default { + function = "GPIU7"; + groups = "GPIU7"; + }; + + pinctrl_hvi3c3_default: hvi3c3_default { + function = "HVI3C3"; + groups = "HVI3C3"; + }; + + pinctrl_hvi3c4_default: hvi3c4_default { + function = "HVI3C4"; + groups = "HVI3C4"; + }; + + pinctrl_i2c1_default: i2c1_default { + function = "I2C1"; + groups = "I2C1"; + }; + + pinctrl_i2c10_default: i2c10_default { + function = "I2C10"; + groups = "I2C10"; + }; + + pinctrl_i2c11_default: i2c11_default { + function = "I2C11"; + groups = "I2C11"; + }; + + pinctrl_i2c12_default: i2c12_default { + function = "I2C12"; + groups = "I2C12"; + }; + + pinctrl_i2c13_default: i2c13_default { + function = "I2C13"; + groups = "I2C13"; + }; + + pinctrl_i2c14_default: i2c14_default { + function = "I2C14"; + groups = "I2C14"; + }; + + pinctrl_i2c15_default: i2c15_default { + function = "I2C15"; + groups = "I2C15"; + }; + + pinctrl_i2c16_default: i2c16_default { + function = "I2C16"; + groups = "I2C16"; + }; + + pinctrl_i2c2_default: i2c2_default { + function = "I2C2"; + groups = "I2C2"; + }; + + pinctrl_i2c3_default: i2c3_default { + function = "I2C3"; + groups = "I2C3"; + }; + + pinctrl_i2c4_default: i2c4_default { + function = "I2C4"; + groups = "I2C4"; + }; + + pinctrl_i2c5_default: i2c5_default { + function = "I2C5"; + groups = "I2C5"; + }; + + pinctrl_i2c6_default: i2c6_default { + function = "I2C6"; + groups = "I2C6"; + }; + + pinctrl_i2c7_default: i2c7_default { + function = "I2C7"; + groups = "I2C7"; + }; + + pinctrl_i2c8_default: i2c8_default { + function = "I2C8"; + groups = "I2C8"; + }; + + pinctrl_i2c9_default: i2c9_default { + function = "I2C9"; + groups = "I2C9"; + }; + + pinctrl_i3c3_default: i3c3_default { + function = "I3C3"; + groups = "I3C3"; + }; + + pinctrl_i3c4_default: i3c4_default { + function = "I3C4"; + groups = "I3C4"; + }; + + pinctrl_i3c5_default: i3c5_default { + function = "I3C5"; + groups = "I3C5"; + }; + + pinctrl_i3c6_default: i3c6_default { + function = "I3C6"; + groups = "I3C6"; + }; + + pinctrl_jtagm_default: jtagm_default { + function = "JTAGM"; + groups = "JTAGM"; + }; + + pinctrl_lhpd_default: lhpd_default { + function = "LHPD"; + groups = "LHPD"; + }; + + pinctrl_lhsirq_default: lhsirq_default { + function = "LHSIRQ"; + groups = "LHSIRQ"; + }; + + pinctrl_lpc_default: lpc_default { + function = "LPC"; + groups = "LPC"; + }; + + pinctrl_lpchc_default: lpchc_default { + function = "LPCHC"; + groups = "LPCHC"; + }; + + pinctrl_lpcpd_default: lpcpd_default { + function = "LPCPD"; + groups = "LPCPD"; + }; + + pinctrl_lpcpme_default: lpcpme_default { + function = "LPCPME"; + groups = "LPCPME"; + }; + + pinctrl_lpcsmi_default: lpcsmi_default { + function = "LPCSMI"; + groups = "LPCSMI"; + }; + + pinctrl_lsirq_default: lsirq_default { + function = "LSIRQ"; + groups = "LSIRQ"; + }; + + pinctrl_maclink1_default: maclink1_default { + function = "MACLINK1"; + groups = "MACLINK1"; + }; + + pinctrl_maclink2_default: maclink2_default { + function = "MACLINK2"; + groups = "MACLINK2"; + }; + + pinctrl_maclink3_default: maclink3_default { + function = "MACLINK3"; + groups = "MACLINK3"; + }; + + pinctrl_maclink4_default: maclink4_default { + function = "MACLINK4"; + groups = "MACLINK4"; + }; + + pinctrl_mdio1_default: mdio1_default { + function = "MDIO1"; + groups = "MDIO1"; + }; + + pinctrl_mdio2_default: mdio2_default { + function = "MDIO2"; + groups = "MDIO2"; + }; + + pinctrl_mdio3_default: mdio3_default { + function = "MDIO3"; + groups = "MDIO3"; + }; + + pinctrl_mdio4_default: mdio4_default { + function = "MDIO4"; + groups = "MDIO4"; + }; + + pinctrl_ncts1_default: ncts1_default { + function = "NCTS1"; + groups = "NCTS1"; + }; + + pinctrl_ncts2_default: ncts2_default { + function = "NCTS2"; + groups = "NCTS2"; + }; + + pinctrl_ncts3_default: ncts3_default { + function = "NCTS3"; + groups = "NCTS3"; + }; + + pinctrl_ncts4_default: ncts4_default { + function = "NCTS4"; + groups = "NCTS4"; + }; + + pinctrl_ndcd1_default: ndcd1_default { + function = "NDCD1"; + groups = "NDCD1"; + }; + + pinctrl_ndcd2_default: ndcd2_default { + function = "NDCD2"; + groups = "NDCD2"; + }; + + pinctrl_ndcd3_default: ndcd3_default { + function = "NDCD3"; + groups = "NDCD3"; + }; + + pinctrl_ndcd4_default: ndcd4_default { + function = "NDCD4"; + groups = "NDCD4"; + }; + + pinctrl_ndsr1_default: ndsr1_default { + function = "NDSR1"; + groups = "NDSR1"; + }; + + pinctrl_ndsr2_default: ndsr2_default { + function = "NDSR2"; + groups = "NDSR2"; + }; + + pinctrl_ndsr3_default: ndsr3_default { + function = "NDSR3"; + groups = "NDSR3"; + }; + + pinctrl_ndsr4_default: ndsr4_default { + function = "NDSR4"; + groups = "NDSR4"; + }; + + pinctrl_ndtr1_default: ndtr1_default { + function = "NDTR1"; + groups = "NDTR1"; + }; + + pinctrl_ndtr2_default: ndtr2_default { + function = "NDTR2"; + groups = "NDTR2"; + }; + + pinctrl_ndtr3_default: ndtr3_default { + function = "NDTR3"; + groups = "NDTR3"; + }; + + pinctrl_ndtr4_default: ndtr4_default { + function = "NDTR4"; + groups = "NDTR4"; + }; + + pinctrl_nri1_default: nri1_default { + function = "NRI1"; + groups = "NRI1"; + }; + + pinctrl_nri2_default: nri2_default { + function = "NRI2"; + groups = "NRI2"; + }; + + pinctrl_nri3_default: nri3_default { + function = "NRI3"; + groups = "NRI3"; + }; + + pinctrl_nri4_default: nri4_default { + function = "NRI4"; + groups = "NRI4"; + }; + + pinctrl_nrts1_default: nrts1_default { + function = "NRTS1"; + groups = "NRTS1"; + }; + + pinctrl_nrts2_default: nrts2_default { + function = "NRTS2"; + groups = "NRTS2"; + }; + + pinctrl_nrts3_default: nrts3_default { + function = "NRTS3"; + groups = "NRTS3"; + }; + + pinctrl_nrts4_default: nrts4_default { + function = "NRTS4"; + groups = "NRTS4"; + }; + + pinctrl_oscclk_default: oscclk_default { + function = "OSCCLK"; + groups = "OSCCLK"; + }; + + pinctrl_pewake_default: pewake_default { + function = "PEWAKE"; + groups = "PEWAKE"; + }; + + pinctrl_pwm0_default: pwm0_default { + function = "PWM0"; + groups = "PWM0"; + }; + + pinctrl_pwm1_default: pwm1_default { + function = "PWM1"; + groups = "PWM1"; + }; + + pinctrl_pwm10g0_default: pwm10g0_default { + function = "PWM10"; + groups = "PWM10G0"; + }; + + pinctrl_pwm10g1_default: pwm10g1_default { + function = "PWM10"; + groups = "PWM10G1"; + }; + + pinctrl_pwm11g0_default: pwm11g0_default { + function = "PWM11"; + groups = "PWM11G0"; + }; + + pinctrl_pwm11g1_default: pwm11g1_default { + function = "PWM11"; + groups = "PWM11G1"; + }; + + pinctrl_pwm12g0_default: pwm12g0_default { + function = "PWM12"; + groups = "PWM12G0"; + }; + + pinctrl_pwm12g1_default: pwm12g1_default { + function = "PWM12"; + groups = "PWM12G1"; + }; + + pinctrl_pwm13g0_default: pwm13g0_default { + function = "PWM13"; + groups = "PWM13G0"; + }; + + pinctrl_pwm13g1_default: pwm13g1_default { + function = "PWM13"; + groups = "PWM13G1"; + }; + + pinctrl_pwm14g0_default: pwm14g0_default { + function = "PWM14"; + groups = "PWM14G0"; + }; + + pinctrl_pwm14g1_default: pwm14g1_default { + function = "PWM14"; + groups = "PWM14G1"; + }; + + pinctrl_pwm15g0_default: pwm15g0_default { + function = "PWM15"; + groups = "PWM15G0"; + }; + + pinctrl_pwm15g1_default: pwm15g1_default { + function = "PWM15"; + groups = "PWM15G1"; + }; + + pinctrl_pwm2_default: pwm2_default { + function = "PWM2"; + groups = "PWM2"; + }; + + pinctrl_pwm3_default: pwm3_default { + function = "PWM3"; + groups = "PWM3"; + }; + + pinctrl_pwm4_default: pwm4_default { + function = "PWM4"; + groups = "PWM4"; + }; + + pinctrl_pwm5_default: pwm5_default { + function = "PWM5"; + groups = "PWM5"; + }; + + pinctrl_pwm6_default: pwm6_default { + function = "PWM6"; + groups = "PWM6"; + }; + + pinctrl_pwm7_default: pwm7_default { + function = "PWM7"; + groups = "PWM7"; + }; + + pinctrl_pwm8g0_default: pwm8g0_default { + function = "PWM8"; + groups = "PWM8G0"; + }; + + pinctrl_pwm8g1_default: pwm8g1_default { + function = "PWM8"; + groups = "PWM8G1"; + }; + + pinctrl_pwm9g0_default: pwm9g0_default { + function = "PWM9"; + groups = "PWM9G0"; + }; + + pinctrl_pwm9g1_default: pwm9g1_default { + function = "PWM9"; + groups = "PWM9G1"; + }; + + pinctrl_qspi1_default: qspi1_default { + function = "QSPI1"; + groups = "QSPI1"; + }; + + pinctrl_qspi2_default: qspi2_default { + function = "QSPI2"; + groups = "QSPI2"; + }; + + pinctrl_rgmii1_default: rgmii1_default { + function = "RGMII1"; + groups = "RGMII1"; + }; + + pinctrl_rgmii2_default: rgmii2_default { + function = "RGMII2"; + groups = "RGMII2"; + }; + + pinctrl_rgmii3_default: rgmii3_default { + function = "RGMII3"; + groups = "RGMII3"; + }; + + pinctrl_rgmii4_default: rgmii4_default { + function = "RGMII4"; + groups = "RGMII4"; + }; + + pinctrl_rmii1_default: rmii1_default { + function = "RMII1"; + groups = "RMII1"; + }; + + pinctrl_rmii2_default: rmii2_default { + function = "RMII2"; + groups = "RMII2"; + }; + + pinctrl_rmii3_default: rmii3_default { + function = "RMII3"; + groups = "RMII3"; + }; + + pinctrl_rmii4_default: rmii4_default { + function = "RMII4"; + groups = "RMII4"; + }; + + pinctrl_rxd1_default: rxd1_default { + function = "RXD1"; + groups = "RXD1"; + }; + + pinctrl_rxd2_default: rxd2_default { + function = "RXD2"; + groups = "RXD2"; + }; + + pinctrl_rxd3_default: rxd3_default { + function = "RXD3"; + groups = "RXD3"; + }; + + pinctrl_rxd4_default: rxd4_default { + function = "RXD4"; + groups = "RXD4"; + }; + + pinctrl_salt1_default: salt1_default { + function = "SALT1"; + groups = "SALT1"; + }; + + pinctrl_salt10g0_default: salt10g0_default { + function = "SALT10"; + groups = "SALT10G0"; + }; + + pinctrl_salt10g1_default: salt10g1_default { + function = "SALT10"; + groups = "SALT10G1"; + }; + + pinctrl_salt11g0_default: salt11g0_default { + function = "SALT11"; + groups = "SALT11G0"; + }; + + pinctrl_salt11g1_default: salt11g1_default { + function = "SALT11"; + groups = "SALT11G1"; + }; + + pinctrl_salt12g0_default: salt12g0_default { + function = "SALT12"; + groups = "SALT12G0"; + }; + + pinctrl_salt12g1_default: salt12g1_default { + function = "SALT12"; + groups = "SALT12G1"; + }; + + pinctrl_salt13g0_default: salt13g0_default { + function = "SALT13"; + groups = "SALT13G0"; + }; + + pinctrl_salt13g1_default: salt13g1_default { + function = "SALT13"; + groups = "SALT13G1"; + }; + + pinctrl_salt14g0_default: salt14g0_default { + function = "SALT14"; + groups = "SALT14G0"; + }; + + pinctrl_salt14g1_default: salt14g1_default { + function = "SALT14"; + groups = "SALT14G1"; + }; + + pinctrl_salt15g0_default: salt15g0_default { + function = "SALT15"; + groups = "SALT15G0"; + }; + + pinctrl_salt15g1_default: salt15g1_default { + function = "SALT15"; + groups = "SALT15G1"; + }; + + pinctrl_salt16g0_default: salt16g0_default { + function = "SALT16"; + groups = "SALT16G0"; + }; + + pinctrl_salt16g1_default: salt16g1_default { + function = "SALT16"; + groups = "SALT16G1"; + }; + + pinctrl_salt2_default: salt2_default { + function = "SALT2"; + groups = "SALT2"; + }; + + pinctrl_salt3_default: salt3_default { + function = "SALT3"; + groups = "SALT3"; + }; + + pinctrl_salt4_default: salt4_default { + function = "SALT4"; + groups = "SALT4"; + }; + + pinctrl_salt5_default: salt5_default { + function = "SALT5"; + groups = "SALT5"; + }; + + pinctrl_salt6_default: salt6_default { + function = "SALT6"; + groups = "SALT6"; + }; + + pinctrl_salt7_default: salt7_default { + function = "SALT7"; + groups = "SALT7"; + }; + + pinctrl_salt8_default: salt8_default { + function = "SALT8"; + groups = "SALT8"; + }; + + pinctrl_salt9g0_default: salt9g0_default { + function = "SALT9"; + groups = "SALT9G0"; + }; + + pinctrl_salt9g1_default: salt9g1_default { + function = "SALT9"; + groups = "SALT9G1"; + }; + + pinctrl_sd1_default: sd1_default { + function = "SD1"; + groups = "SD1"; + }; + + pinctrl_sd2_default: sd2_default { + function = "SD2"; + groups = "SD2"; + }; + + pinctrl_sd3_default: sd3_default { + function = "SD3"; + groups = "SD3"; + }; + + pinctrl_emmc_default: emmc_default { + function = "SD3"; + groups = "EMMC"; + }; + + pinctrl_sgpm1_default: sgpm1_default { + function = "SGPM1"; + groups = "SGPM1"; + }; + + pinctrl_sgps1_default: sgps1_default { + function = "SGPS1"; + groups = "SGPS1"; + }; + + pinctrl_sioonctrl_default: sioonctrl_default { + function = "SIOONCTRL"; + groups = "SIOONCTRL"; + }; + + pinctrl_siopbi_default: siopbi_default { + function = "SIOPBI"; + groups = "SIOPBI"; + }; + + pinctrl_siopbo_default: siopbo_default { + function = "SIOPBO"; + groups = "SIOPBO"; + }; + + pinctrl_siopwreq_default: siopwreq_default { + function = "SIOPWREQ"; + groups = "SIOPWREQ"; + }; + + pinctrl_siopwrgd_default: siopwrgd_default { + function = "SIOPWRGD"; + groups = "SIOPWRGD"; + }; + + pinctrl_sios3_default: sios3_default { + function = "SIOS3"; + groups = "SIOS3"; + }; + + pinctrl_sios5_default: sios5_default { + function = "SIOS5"; + groups = "SIOS5"; + }; + + pinctrl_siosci_default: siosci_default { + function = "SIOSCI"; + groups = "SIOSCI"; + }; + + pinctrl_spi1_default: spi1_default { + function = "SPI1"; + groups = "SPI1"; + }; + + pinctrl_spi1abr_default: spi1abr_default { + function = "SPI1ABR"; + groups = "SPI1ABR"; + }; + + pinctrl_spi1cs1_default: spi1cs1_default { + function = "SPI1CS1"; + groups = "SPI1CS1"; + }; + + pinctrl_spi1wp_default: spi1wp_default { + function = "SPI1WP"; + groups = "SPI1WP"; + }; + + pinctrl_spi2_default: spi2_default { + function = "SPI2"; + groups = "SPI2"; + }; + + pinctrl_spi2cs1_default: spi2cs1_default { + function = "SPI2CS1"; + groups = "SPI2CS1"; + }; + + pinctrl_spi2cs2_default: spi2cs2_default { + function = "SPI2CS2"; + groups = "SPI2CS2"; + }; + + pinctrl_tach0_default: tach0_default { + function = "TACH0"; + groups = "TACH0"; + }; + + pinctrl_tach1_default: tach1_default { + function = "TACH1"; + groups = "TACH1"; + }; + + pinctrl_tach10_default: tach10_default { + function = "TACH10"; + groups = "TACH10"; + }; + + pinctrl_tach11_default: tach11_default { + function = "TACH11"; + groups = "TACH11"; + }; + + pinctrl_tach12_default: tach12_default { + function = "TACH12"; + groups = "TACH12"; + }; + + pinctrl_tach13_default: tach13_default { + function = "TACH13"; + groups = "TACH13"; + }; + + pinctrl_tach14_default: tach14_default { + function = "TACH14"; + groups = "TACH14"; + }; + + pinctrl_tach15_default: tach15_default { + function = "TACH15"; + groups = "TACH15"; + }; + + pinctrl_tach2_default: tach2_default { + function = "TACH2"; + groups = "TACH2"; + }; + + pinctrl_tach3_default: tach3_default { + function = "TACH3"; + groups = "TACH3"; + }; + + pinctrl_tach4_default: tach4_default { + function = "TACH4"; + groups = "TACH4"; + }; + + pinctrl_tach5_default: tach5_default { + function = "TACH5"; + groups = "TACH5"; + }; + + pinctrl_tach6_default: tach6_default { + function = "TACH6"; + groups = "TACH6"; + }; + + pinctrl_tach7_default: tach7_default { + function = "TACH7"; + groups = "TACH7"; + }; + + pinctrl_tach8_default: tach8_default { + function = "TACH8"; + groups = "TACH8"; + }; + + pinctrl_tach9_default: tach9_default { + function = "TACH9"; + groups = "TACH9"; + }; + + pinctrl_thru0_default: thru0_default { + function = "THRU0"; + groups = "THRU0"; + }; + + pinctrl_thru1_default: thru1_default { + function = "THRU1"; + groups = "THRU1"; + }; + + pinctrl_thru2_default: thru2_default { + function = "THRU2"; + groups = "THRU2"; + }; + + pinctrl_thru3_default: thru3_default { + function = "THRU3"; + groups = "THRU3"; + }; + + pinctrl_txd1_default: txd1_default { + function = "TXD1"; + groups = "TXD1"; + }; + + pinctrl_txd2_default: txd2_default { + function = "TXD2"; + groups = "TXD2"; + }; + + pinctrl_txd3_default: txd3_default { + function = "TXD3"; + groups = "TXD3"; + }; + + pinctrl_txd4_default: txd4_default { + function = "TXD4"; + groups = "TXD4"; + }; + + pinctrl_uart10_default: uart10_default { + function = "UART10"; + groups = "UART10"; + }; + + pinctrl_uart11_default: uart11_default { + function = "UART11"; + groups = "UART11"; + }; + + pinctrl_uart12g0_default: uart12g0_default { + function = "UART12"; + groups = "UART12G0"; + }; + + pinctrl_uart12g1_default: uart12g1_default { + function = "UART12"; + groups = "UART12G1"; + }; + + pinctrl_uart13g0_default: uart13g0_default { + function = "UART13"; + groups = "UART13G0"; + }; + + pinctrl_uart13g1_default: uart13g1_default { + function = "UART13"; + groups = "UART13G1"; + }; + + pinctrl_uart6_default: uart6_default { + function = "UART6"; + groups = "UART6"; + }; + + pinctrl_uart7_default: uart7_default { + function = "UART7"; + groups = "UART7"; + }; + + pinctrl_uart8_default: uart8_default { + function = "UART8"; + groups = "UART8"; + }; + + pinctrl_uart9_default: uart9_default { + function = "UART9"; + groups = "UART9"; + }; + + pinctrl_vb_default: vb_default { + function = "VB"; + groups = "VB"; + }; + + pinctrl_vgahs_default: vgahs_default { + function = "VGAHS"; + groups = "VGAHS"; + }; + + pinctrl_vgavs_default: vgavs_default { + function = "VGAVS"; + groups = "VGAVS"; + }; + + pinctrl_wdtrst1_default: wdtrst1_default { + function = "WDTRST1"; + groups = "WDTRST1"; + }; + + pinctrl_wdtrst2_default: wdtrst2_default { + function = "WDTRST2"; + groups = "WDTRST2"; + }; + + pinctrl_wdtrst3_default: wdtrst3_default { + function = "WDTRST3"; + groups = "WDTRST3"; + }; + + pinctrl_wdtrst4_default: wdtrst4_default { + function = "WDTRST4"; + groups = "WDTRST4"; + }; +}; diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi new file mode 100644 index 00000000000000..3a1422f7c49cce --- /dev/null +++ b/arch/arm/boot/dts/aspeed-g6.dtsi @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2019 IBM Corp. + +#include +#include + +/ { + model = "Aspeed BMC"; + compatible = "aspeed,ast2600"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&gic>; + + aliases { + serial4 = &uart5; + }; + + + cpus { + #address-cells = <1>; + #size-cells = <0>; + enable-method = "aspeed,ast2600-smp"; + + cpu@f00 { + compatible = "arm,cortex-a7"; + device_type = "cpu"; + reg = <0xf00>; + }; + + cpu@f01 { + compatible = "arm,cortex-a7"; + device_type = "cpu"; + reg = <0xf01>; + }; + }; + + timer { + compatible = "arm,armv7-timer"; + interrupt-parent = <&gic>; + interrupts = , + , + , + ; + clocks = <&syscon ASPEED_CLK_HPLL>; + arm,cpu-registers-not-fw-configured; + }; + + ahb { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + ranges; + + gic: interrupt-controller@40461000 { + compatible = "arm,cortex-a7-gic"; + interrupts = ; + #interrupt-cells = <3>; + interrupt-controller; + interrupt-parent = <&gic>; + reg = <0x40461000 0x1000>, + <0x40462000 0x1000>, + <0x40464000 0x2000>, + <0x40466000 0x2000>; + }; + + mdio0: mdio@1e650000 { + compatible = "aspeed,ast2600-mdio"; + reg = <0x1e650000 0x8>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + mdio1: mdio@1e650008 { + compatible = "aspeed,ast2600-mdio"; + reg = <0x1e650008 0x8>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + mdio2: mdio@1e650010 { + compatible = "aspeed,ast2600-mdio"; + reg = <0x1e650010 0x8>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + mdio3: mdio@1e650018 { + compatible = "aspeed,ast2600-mdio"; + reg = <0x1e650018 0x8>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + mac0: ftgmac@1e660000 { + compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; + reg = <0x1e660000 0x180>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>; + status = "disabled"; + }; + + mac1: ftgmac@1e680000 { + compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; + reg = <0x1e680000 0x180>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clocks = <&syscon ASPEED_CLK_GATE_MAC2CLK>; + status = "disabled"; + }; + + mac2: ftgmac@1e670000 { + compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; + reg = <0x1e670000 0x180>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>; + status = "disabled"; + }; + + mac3: ftgmac@1e690000 { + compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; + reg = <0x1e690000 0x180>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>; + status = "disabled"; + }; + + apb { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + syscon: syscon@1e6e2000 { + compatible = "aspeed,ast2600-scu", "syscon", "simple-mfd"; + reg = <0x1e6e2000 0x1000>; + ranges = <0 0x1e6e2000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + #clock-cells = <1>; + #reset-cells = <1>; + + pinctrl: pinctrl { + compatible = "aspeed,ast2600-pinctrl"; + }; + + smp-memram@180 { + compatible = "aspeed,ast2600-smpmem"; + reg = <0x180 0x40>; + }; + }; + + rng: hwrng@1e6e2524 { + compatible = "timeriomem_rng"; + reg = <0x1e6e2524 0x4>; + period = <1>; + quality = <100>; + }; + + rtc: rtc@1e781000 { + compatible = "aspeed,ast2600-rtc"; + reg = <0x1e781000 0x18>; + interrupts = ; + status = "disabled"; + }; + + uart5: serial@1e784000 { + compatible = "ns16550a"; + reg = <0x1e784000 0x1000>; + reg-shift = <2>; + interrupts = ; + clocks = <&syscon ASPEED_CLK_GATE_UART5CLK>; + no-loopback-test; + }; + + wdt1: watchdog@1e785000 { + compatible = "aspeed,ast2600-wdt"; + reg = <0x1e785000 0x40>; + }; + + wdt2: watchdog@1e785040 { + compatible = "aspeed,ast2600-wdt"; + reg = <0x1e785040 0x40>; + status = "disabled"; + }; + + wdt3: watchdog@1e785080 { + compatible = "aspeed,ast2600-wdt"; + reg = <0x1e785080 0x40>; + status = "disabled"; + }; + + wdt4: watchdog@1e7850C0 { + compatible = "aspeed,ast2600-wdt"; + reg = <0x1e7850C0 0x40>; + status = "disabled"; + }; + + sdc: sdc@1e740000 { + compatible = "aspeed,ast2600-sd-controller"; + reg = <0x1e740000 0x100>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x1e740000 0x10000>; + clocks = <&syscon ASPEED_CLK_GATE_SDCLK>; + status = "disabled"; + + sdhci0: sdhci@1e740100 { + compatible = "aspeed,ast2600-sdhci", "sdhci"; + reg = <0x100 0x100>; + interrupts = ; + sdhci,auto-cmd12; + clocks = <&syscon ASPEED_CLK_SDIO>; + status = "disabled"; + }; + + sdhci1: sdhci@1e740200 { + compatible = "aspeed,ast2600-sdhci", "sdhci"; + reg = <0x200 0x100>; + interrupts = ; + sdhci,auto-cmd12; + clocks = <&syscon ASPEED_CLK_SDIO>; + status = "disabled"; + }; + }; + + emmc: sdc@1e750000 { + compatible = "aspeed,ast2600-sd-controller"; + reg = <0x1e750000 0x100>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x1e750000 0x10000>; + clocks = <&syscon ASPEED_CLK_GATE_EMMCCLK>; + status = "disabled"; + + sdhci@1e750100 { + compatible = "aspeed,ast2600-sdhci"; + reg = <0x100 0x100>; + sdhci,auto-cmd12; + interrupts = ; + clocks = <&syscon ASPEED_CLK_EMMC>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_emmc_default>; + }; + }; + }; + }; +}; + +#include "aspeed-g6-pinctrl.dtsi" diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 21e5914fdd6209..ea0e7c19eb4e3e 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -1118,7 +1118,6 @@ target-module@20000 { /* 0x48020000, ap 3 04.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart3"; reg = <0x20050 0x4>, <0x20054 0x4>, <0x20058 0x4>; @@ -1263,7 +1262,6 @@ gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio7"; reg = <0x51000 0x4>, <0x51010 0x4>, <0x51114 0x4>; @@ -1297,7 +1295,6 @@ target-module@53000 { /* 0x48053000, ap 35 36.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio8"; reg = <0x53000 0x4>, <0x53010 0x4>, <0x53114 0x4>; @@ -1331,7 +1328,6 @@ target-module@55000 { /* 0x48055000, ap 13 0e.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio2"; reg = <0x55000 0x4>, <0x55010 0x4>, <0x55114 0x4>; @@ -1365,7 +1361,6 @@ target-module@57000 { /* 0x48057000, ap 15 06.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio3"; reg = <0x57000 0x4>, <0x57010 0x4>, <0x57114 0x4>; @@ -1399,7 +1394,6 @@ target-module@59000 { /* 0x48059000, ap 17 16.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio4"; reg = <0x59000 0x4>, <0x59010 0x4>, <0x59114 0x4>; @@ -1433,7 +1427,6 @@ target-module@5b000 { /* 0x4805b000, ap 19 1e.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio5"; reg = <0x5b000 0x4>, <0x5b010 0x4>, <0x5b114 0x4>; @@ -1467,7 +1460,6 @@ target-module@5d000 { /* 0x4805d000, ap 21 26.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio6"; reg = <0x5d000 0x4>, <0x5d010 0x4>, <0x5d114 0x4>; @@ -1501,7 +1493,6 @@ target-module@60000 { /* 0x48060000, ap 23 32.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c3"; reg = <0x60000 0x8>, <0x60010 0x8>, <0x60090 0x8>; @@ -1534,7 +1525,6 @@ target-module@66000 { /* 0x48066000, ap 63 14.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart5"; reg = <0x66050 0x4>, <0x66054 0x4>, <0x66058 0x4>; @@ -1567,7 +1557,6 @@ target-module@68000 { /* 0x48068000, ap 53 1c.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart6"; reg = <0x68050 0x4>, <0x68054 0x4>, <0x68058 0x4>; @@ -1600,7 +1589,6 @@ target-module@6a000 { /* 0x4806a000, ap 24 24.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart1"; reg = <0x6a050 0x4>, <0x6a054 0x4>, <0x6a058 0x4>; @@ -1633,7 +1621,6 @@ target-module@6c000 { /* 0x4806c000, ap 26 2c.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart2"; reg = <0x6c050 0x4>, <0x6c054 0x4>, <0x6c058 0x4>; @@ -1666,7 +1653,6 @@ target-module@6e000 { /* 0x4806e000, ap 28 0c.1 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart4"; reg = <0x6e050 0x4>, <0x6e054 0x4>, <0x6e058 0x4>; @@ -1699,7 +1685,6 @@ target-module@70000 { /* 0x48070000, ap 30 22.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c1"; reg = <0x70000 0x8>, <0x70010 0x8>, <0x70090 0x8>; @@ -1732,7 +1717,6 @@ target-module@72000 { /* 0x48072000, ap 32 2a.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c2"; reg = <0x72000 0x8>, <0x72010 0x8>, <0x72090 0x8>; @@ -1795,7 +1779,6 @@ target-module@7a000 { /* 0x4807a000, ap 81 3a.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c4"; reg = <0x7a000 0x8>, <0x7a010 0x8>, <0x7a090 0x8>; @@ -1828,7 +1811,6 @@ target-module@7c000 { /* 0x4807c000, ap 83 4a.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c5"; reg = <0x7c000 0x8>, <0x7c010 0x8>, <0x7c090 0x8>; @@ -1942,7 +1924,6 @@ target-module@98000 { /* 0x48098000, ap 47 08.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mcspi1"; reg = <0x98000 0x4>, <0x98010 0x4>; reg-names = "rev", "sysc"; @@ -1982,7 +1963,6 @@ target-module@9a000 { /* 0x4809a000, ap 49 10.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mcspi2"; reg = <0x9a000 0x4>, <0x9a010 0x4>; reg-names = "rev", "sysc"; @@ -2017,7 +1997,6 @@ target-module@9c000 { /* 0x4809c000, ap 51 38.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mmc1"; reg = <0x9c000 0x4>, <0x9c010 0x4>; reg-names = "rev", "sysc"; @@ -2077,7 +2056,6 @@ target-module@ad000 { /* 0x480ad000, ap 61 20.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mmc3"; reg = <0xad000 0x4>, <0xad010 0x4>; reg-names = "rev", "sysc"; @@ -2137,7 +2115,6 @@ target-module@b4000 { /* 0x480b4000, ap 65 40.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mmc2"; reg = <0xb4000 0x4>, <0xb4010 0x4>; reg-names = "rev", "sysc"; @@ -2174,7 +2151,6 @@ target-module@b8000 { /* 0x480b8000, ap 67 48.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mcspi3"; reg = <0xb8000 0x4>, <0xb8010 0x4>; reg-names = "rev", "sysc"; @@ -2206,7 +2182,6 @@ target-module@ba000 { /* 0x480ba000, ap 69 18.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mcspi4"; reg = <0xba000 0x4>, <0xba010 0x4>; reg-names = "rev", "sysc"; @@ -2238,7 +2213,6 @@ target-module@d1000 { /* 0x480d1000, ap 71 28.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "mmc4"; reg = <0xd1000 0x4>, <0xd1010 0x4>; reg-names = "rev", "sysc"; @@ -2384,7 +2358,6 @@ target-module@20000 { /* 0x48420000, ap 47 02.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart7"; reg = <0x20050 0x4>, <0x20054 0x4>, <0x20058 0x4>; @@ -2415,7 +2388,6 @@ target-module@22000 { /* 0x48422000, ap 49 0a.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart8"; reg = <0x22050 0x4>, <0x22054 0x4>, <0x22058 0x4>; @@ -2446,7 +2418,6 @@ target-module@24000 { /* 0x48424000, ap 51 12.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart9"; reg = <0x24050 0x4>, <0x24054 0x4>, <0x24058 0x4>; @@ -2735,7 +2706,6 @@ target-module@60000 { /* 0x48460000, ap 9 0e.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp1"; reg = <0x60000 0x4>, <0x60004 0x4>; reg-names = "rev", "sysc"; @@ -2772,7 +2742,6 @@ target-module@64000 { /* 0x48464000, ap 11 1e.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp2"; reg = <0x64000 0x4>, <0x64004 0x4>; reg-names = "rev", "sysc"; @@ -2809,7 +2778,6 @@ target-module@68000 { /* 0x48468000, ap 13 26.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp3"; reg = <0x68000 0x4>, <0x68004 0x4>; reg-names = "rev", "sysc"; @@ -2845,7 +2813,6 @@ target-module@6c000 { /* 0x4846c000, ap 15 2e.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp4"; reg = <0x6c000 0x4>, <0x6c004 0x4>; reg-names = "rev", "sysc"; @@ -2881,7 +2848,6 @@ target-module@70000 { /* 0x48470000, ap 19 36.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp5"; reg = <0x70000 0x4>, <0x70004 0x4>; reg-names = "rev", "sysc"; @@ -2917,7 +2883,6 @@ target-module@74000 { /* 0x48474000, ap 35 14.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp6"; reg = <0x74000 0x4>, <0x74004 0x4>; reg-names = "rev", "sysc"; @@ -2953,7 +2918,6 @@ target-module@78000 { /* 0x48478000, ap 39 0c.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp7"; reg = <0x78000 0x4>, <0x78004 0x4>; reg-names = "rev", "sysc"; @@ -2989,7 +2953,6 @@ target-module@7c000 { /* 0x4847c000, ap 43 04.0 */ compatible = "ti,sysc-dra7-mcasp", "ti,sysc"; - ti,hwmods = "mcasp8"; reg = <0x7c000 0x4>, <0x7c004 0x4>; reg-names = "rev", "sysc"; @@ -3045,7 +3008,6 @@ target-module@84000 { /* 0x48484000, ap 3 10.0 */ compatible = "ti,sysc-omap4-simple", "ti,sysc"; - ti,hwmods = "gmac"; reg = <0x85200 0x4>, <0x85208 0x4>, <0x85204 0x4>; @@ -3103,9 +3065,10 @@ davinci_mdio: mdio@1000 { compatible = "ti,cpsw-mdio","ti,davinci_mdio"; + clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <0>; - ti,hwmods = "davinci_mdio"; bus_freq = <1000000>; reg = <0x1000 0x100>; }; @@ -4311,7 +4274,6 @@ target-module@0 { /* 0x4ae10000, ap 5 20.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "gpio1"; reg = <0x0 0x4>, <0x10 0x4>, <0x114 0x4>; @@ -4479,7 +4441,6 @@ target-module@b000 { /* 0x4ae2b000, ap 28 02.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "uart10"; reg = <0xb050 0x4>, <0xb054 0x4>, <0xb058 0x4>; diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index bfaa2de63a1001..e2030ba16512f5 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -72,7 +72,6 @@ reg = <0>; /* 50 ns min period = 20 MHz */ spi-max-frequency = <20000000>; - spi-cpol; /* Clock active low */ vcc-supply = <&vdisp>; iovcc-supply = <&vdisp>; vci-supply = <&vdisp>; diff --git a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts new file mode 100644 index 00000000000000..6cfa0d4a18845c --- /dev/null +++ b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT +/* + * OLPC XO 1.75 Laptop. + * + * Copyright (C) 2018,2019 Lubomir Rintel + */ + +/dts-v1/; +#include "mmp2.dtsi" +#include +#include +#include + +/ { + model = "OLPC XO-1.75"; + compatible = "olpc,xo-1.75", "mrvl,mmp2"; + + chosen { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + framebuffer@1fc00000 { + compatible = "simple-framebuffer"; + reg = <0x1fc00000 (1200 * 900 * 2)>; + width = <1200>; + height = <900>; + stride = <(1200 * 2)>; + format = "r5g6b5"; + clocks = <&soc_clocks MMP2_CLK_DISP0_LCDC>, + <&soc_clocks MMP2_CLK_DISP0>; + }; + }; + + memory { + linux,usable-memory = <0x0 0x1f800000>; + available = <0xcf000 0x1ef31000 0x1000 0xbf000>; + reg = <0x0 0x20000000>; + device_type = "memory"; + }; + + gpio-keys { + compatible = "gpio-keys"; + + lid { + label = "Lid"; + gpios = <&gpio 129 GPIO_ACTIVE_LOW>; + linux,input-type = ; + linux,code = ; + wakeup-source; + }; + + tablet_mode { + label = "E-Book Mode"; + gpios = <&gpio 128 GPIO_ACTIVE_LOW>; + linux,input-type = ; + linux,code = ; + wakeup-source; + }; + + microphone_insert { + label = "Microphone Plug"; + gpios = <&gpio 96 GPIO_ACTIVE_HIGH>; + linux,input-type = ; + linux,code = ; + debounce-interval = <100>; + wakeup-source; + }; + + headphone_insert { + label = "Headphone Plug"; + gpios = <&gpio 97 GPIO_ACTIVE_HIGH>; + linux,input-type = ; + linux,code = ; + debounce-interval = <100>; + wakeup-source; + }; + }; + + camera_i2c { + compatible = "i2c-gpio"; + gpios = <&gpio 109 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>, + <&gpio 108 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + #address-cells = <1>; + #size-cells = <0>; + i2c-gpio,timeout-ms = <1000>; + status = "okay"; + + camera@21 { + compatible = "ovti,ov7670"; + reg = <0x21>; + reset-gpios = <&gpio 102 GPIO_ACTIVE_LOW>; + powerdown-gpios = <&gpio 150 GPIO_ACTIVE_LOW>; + clocks = <&camera0>; + clock-names = "xclk"; + + port { + ov7670_0: endpoint { + hsync-active = <1>; + vsync-active = <1>; + remote-endpoint = <&camera0_0>; + }; + }; + }; + }; + + battery { + compatible = "olpc,xo1.5-battery", "olpc,xo1-battery"; + }; + + wlan_reg: fixedregulator0 { + compatible = "regulator-fixed"; + regulator-name = "wlan"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio 34 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + wlan_pwrseq: pwrseq0 { + compatible = "mmc-pwrseq-sd8787"; + powerdown-gpios = <&gpio 57 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 58 GPIO_ACTIVE_HIGH>; + }; + + soc { + axi@d4200000 { + ap-sp@d4290000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "olpc,ap-sp"; + interrupts = <40>; + reg = <0xd4290000 0x1000>; + data-gpios = <&gpio 72 GPIO_ACTIVE_HIGH>; + clk-gpios = <&gpio 71 GPIO_ACTIVE_HIGH>; + status = "okay"; + }; + }; + }; +}; + +&uart3 { + status = "okay"; +}; + +&uart4 { + status = "okay"; +}; + +&rtc { + status = "okay"; +}; + +&usb_phy0 { + status = "okay"; +}; + +&usb_otg0 { + status = "okay"; +}; + +&mmc1 { + clock-frequency = <50000000>; + no-1-8-v; + mrvl,clk-delay-cycles = <31>; + broken-cd; + status = "okay"; +}; + +&mmc2 { + clock-frequency = <50000000>; + no-1-8-v; + bus-width = <4>; + non-removable; + broken-cd; + wakeup-source; + keep-power-in-suspend; + mmc-pwrseq = <&wlan_pwrseq>; + vmmc-supply = <&wlan_reg>; + status = "okay"; +}; + +&mmc3 { + clock-frequency = <50000000>; + no-1-8-v; + bus-width = <8>; + non-removable; + broken-cd; + mrvl,clk-delay-cycles = <31>; + status = "okay"; +}; + +&twsi1 { + status = "okay"; + + audio-codec@1a { + compatible = "realtek,alc5631"; + reg = <0x1a>; + status = "okay"; + }; +}; + +&twsi2 { + status = "okay"; + + rtc@68 { + compatible = "dallas,ds1338"; + reg = <0x68>; + status = "okay"; + }; +}; + +&twsi6 { + status = "okay"; + + accelerometer@1d { + compatible = "st,lis331dlh", "st,lis3lv02d"; + reg = <0x1d>; + status = "okay"; + }; +}; + +&ssp3 { + #address-cells = <0>; + spi-slave; + status = "okay"; + ready-gpio = <&gpio 125 GPIO_ACTIVE_HIGH>; + + slave { + compatible = "olpc,xo1.75-ec"; + spi-cpha; + cmd-gpio = <&gpio 155 GPIO_ACTIVE_HIGH>; + }; +}; + +&camera0 { + status = "okay"; + + port { + camera0_0: endpoint { + remote-endpoint = <&ov7670_0>; + }; + }; +}; diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi index b6f40743e07b0e..6a2f072c1d0a88 100644 --- a/arch/arm/boot/dts/mmp2.dtsi +++ b/arch/arm/boot/dts/mmp2.dtsi @@ -117,7 +117,7 @@ mrvl,intc-nr-irqs = <2>; }; - usb_otg_phy0: usb-otg-phy@d4207000 { + usb_phy0: usb-phy@d4207000 { compatible = "marvell,mmp2-usb-phy"; reg = <0xd4207000 0x40>; #phy-cells = <0>; @@ -130,7 +130,7 @@ interrupts = <44>; clocks = <&soc_clocks MMP2_CLK_USB>; clock-names = "USBCLK"; - phys = <&usb_otg_phy0>; + phys = <&usb_phy0>; phy-names = "usb"; status = "disabled"; }; @@ -170,6 +170,28 @@ interrupts = <54>; status = "disabled"; }; + + camera0: camera@d420a000 { + compatible = "marvell,mmp2-ccic"; + reg = <0xd420a000 0x800>; + interrupts = <42>; + clocks = <&soc_clocks MMP2_CLK_CCIC0>; + clock-names = "axi"; + #clock-cells = <0>; + clock-output-names = "mclk"; + status = "disabled"; + }; + + camera1: camera@d420a800 { + compatible = "marvell,mmp2-ccic"; + reg = <0xd420a800 0x800>; + interrupts = <30>; + clocks = <&soc_clocks MMP2_CLK_CCIC1>; + clock-names = "axi"; + #clock-cells = <0>; + clock-output-names = "mclk"; + status = "disabled"; + }; }; apb@d4000000 { /* APB */ @@ -192,6 +214,7 @@ interrupts = <27>; clocks = <&soc_clocks MMP2_CLK_UART0>; resets = <&soc_clocks MMP2_CLK_UART0>; + reg-shift = <2>; status = "disabled"; }; @@ -201,6 +224,7 @@ interrupts = <28>; clocks = <&soc_clocks MMP2_CLK_UART1>; resets = <&soc_clocks MMP2_CLK_UART1>; + reg-shift = <2>; status = "disabled"; }; @@ -210,6 +234,7 @@ interrupts = <24>; clocks = <&soc_clocks MMP2_CLK_UART2>; resets = <&soc_clocks MMP2_CLK_UART2>; + reg-shift = <2>; status = "disabled"; }; @@ -219,6 +244,7 @@ interrupts = <46>; clocks = <&soc_clocks MMP2_CLK_UART3>; resets = <&soc_clocks MMP2_CLK_UART3>; + reg-shift = <2>; status = "disabled"; }; @@ -346,40 +372,48 @@ status = "disabled"; }; - ssp1: ssp@d4035000 { + ssp1: spi@d4035000 { compatible = "marvell,mmp2-ssp"; reg = <0xd4035000 0x1000>; clocks = <&soc_clocks MMP2_CLK_SSP0>; interrupts = <0>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; - ssp2: ssp@d4036000 { + ssp2: spi@d4036000 { compatible = "marvell,mmp2-ssp"; reg = <0xd4036000 0x1000>; clocks = <&soc_clocks MMP2_CLK_SSP1>; interrupts = <1>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; - ssp3: ssp@d4037000 { + ssp3: spi@d4037000 { compatible = "marvell,mmp2-ssp"; reg = <0xd4037000 0x1000>; clocks = <&soc_clocks MMP2_CLK_SSP2>; interrupts = <20>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; - ssp4: ssp@d4039000 { + ssp4: spi@d4039000 { compatible = "marvell,mmp2-ssp"; reg = <0xd4039000 0x1000>; clocks = <&soc_clocks MMP2_CLK_SSP3>; interrupts = <21>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; }; - soc_clocks: clocks{ + soc_clocks: clocks { compatible = "marvell,mmp2-clock"; reg = <0xd4050000 0x1000>, <0xd4282800 0x400>, diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi index f572a477f74ca2..7b09cbee8bb8b5 100644 --- a/arch/arm/boot/dts/omap34xx.dtsi +++ b/arch/arm/boot/dts/omap34xx.dtsi @@ -100,6 +100,32 @@ interrupts = <18>; }; }; + + /* + * On omap34xx the OCP registers do not seem to be accessible + * at all unlike on 36xx. Maybe SGX is permanently set to + * "OCP bypass mode", or maybe there is OCP_SYSCONFIG that is + * write-only at 0x50000e10. We detect SGX based on the SGX + * revision register instead of the unreadable OCP revision + * register. Also note that on early 34xx es1 revision there + * are also different clocks, but we do not have any dts users + * for it. + */ + sgx_module: target-module@50000000 { + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x50000014 0x4>; + reg-names = "rev"; + clocks = <&sgx_fck>, <&sgx_ick>; + clock-names = "fck", "ick"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x50000000 0x4000>; + + /* + * Closed source PowerVR driver, no child device + * binding or driver in mainline + */ + }; }; thermal_zones: thermal-zones { diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi index 6fb23ada1f64de..1e552f08f120ed 100644 --- a/arch/arm/boot/dts/omap36xx.dtsi +++ b/arch/arm/boot/dts/omap36xx.dtsi @@ -139,6 +139,34 @@ interrupts = <18>; }; }; + + /* + * Note that the sysconfig register layout is a subset of the + * "ti,sysc-omap4" type register with just sidle and midle bits + * available while omap34xx has "ti,sysc-omap2" type sysconfig. + */ + sgx_module: target-module@50000000 { + compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0x5000fe00 0x4>, + <0x5000fe10 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-midle = , + , + ; + ti,sysc-sidle = , + , + ; + clocks = <&sgx_fck>, <&sgx_ick>; + clock-names = "fck", "ick"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x50000000 0x2000000>; + + /* + * Closed source PowerVR driver, no child device + * binding or driver in mainline + */ + }; }; thermal_zones: thermal-zones { diff --git a/arch/arm/boot/dts/omap4-l4-abe.dtsi b/arch/arm/boot/dts/omap4-l4-abe.dtsi index 67072df39bc745..8e6662bb9e837f 100644 --- a/arch/arm/boot/dts/omap4-l4-abe.dtsi +++ b/arch/arm/boot/dts/omap4-l4-abe.dtsi @@ -255,7 +255,6 @@ target-module@30000 { /* 0x40130000, ap 14 0e.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "wd_timer3"; reg = <0x30000 0x4>, <0x30010 0x4>, <0x30014 0x4>; diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi index bea05dc4ef0f96..d60d5e0ecc4c14 100644 --- a/arch/arm/boot/dts/omap4-l4.dtsi +++ b/arch/arm/boot/dts/omap4-l4.dtsi @@ -456,17 +456,43 @@ }; }; + /* d2d mdm */ target-module@36000 { /* 0x4a0b6000, ap 69 60.0 */ - compatible = "ti,sysc"; - status = "disabled"; + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x36000 0x4>, + <0x36010 0x4>, + <0x36014 0x4>; + reg-names = "rev", "sysc", "syss"; + ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)>; + ti,sysc-sidle = , + , + , + ; + ti,syss-mask = <1>; + /* Domains (V, P, C): core, core_pwrdm, d2d_clkdm */ + clocks = <&d2d_clkctrl OMAP4_C2C_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x36000 0x1000>; }; + /* d2d mpu */ target-module@4d000 { /* 0x4a0cd000, ap 78 58.0 */ - compatible = "ti,sysc"; - status = "disabled"; + compatible = "ti,sysc-omap2", "ti,sysc"; + reg = <0x4d000 0x4>, + <0x4d010 0x4>, + <0x4d014 0x4>; + reg-names = "rev", "sysc", "syss"; + ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)>; + ti,sysc-sidle = , + , + , + ; + ti,syss-mask = <1>; + /* Domains (V, P, C): core, core_pwrdm, d2d_clkdm */ + clocks = <&d2d_clkctrl OMAP4_C2C_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x4d000 0x1000>; @@ -1094,7 +1120,6 @@ target-module@4000 { /* 0x4a314000, ap 7 18.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "wd_timer2"; reg = <0x4000 0x4>, <0x4010 0x4>, <0x4014 0x4>; @@ -1695,7 +1720,6 @@ target-module@60000 { /* 0x48060000, ap 25 1e.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c3"; reg = <0x60000 0x8>, <0x60010 0x8>, <0x60090 0x8>; @@ -1814,7 +1838,6 @@ target-module@70000 { /* 0x48070000, ap 32 28.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c1"; reg = <0x70000 0x8>, <0x70010 0x8>, <0x70090 0x8>; @@ -1846,7 +1869,6 @@ target-module@72000 { /* 0x48072000, ap 34 30.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c2"; reg = <0x72000 0x8>, <0x72010 0x8>, <0x72090 0x8>; @@ -2401,7 +2423,6 @@ target-module@150000 { /* 0x48350000, ap 77 4c.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; - ti,hwmods = "i2c4"; reg = <0x150000 0x8>, <0x150010 0x8>, <0x150090 0x8>; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index c43e52fd5f65cf..7cc95bc1598b6f 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -330,7 +330,6 @@ target-module@56000000 { compatible = "ti,sysc-omap4", "ti,sysc"; - ti,hwmods = "gpu"; reg = <0x5601fc00 0x4>, <0x5601fc10 0x4>; reg-names = "rev", "sysc"; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index edfd26c0346218..1fb7937638f078 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -257,6 +257,29 @@ ports-implemented = <0x1>; }; + target-module@56000000 { + compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0x5600fe00 0x4>, + <0x5600fe10 0x4>; + reg-names = "rev", "sysc"; + ti,sysc-midle = , + , + ; + ti,sysc-sidle = , + , + ; + clocks = <&gpu_clkctrl OMAP5_GPU_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x56000000 0x2000000>; + + /* + * Closed source PowerVR driver, no child device + * binding or driver in mainline + */ + }; + dss: dss@58000000 { compatible = "ti,omap5-dss"; reg = <0x58000000 0x80>; diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index 33e8dd905bff7d..fac2e57dcca905 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi @@ -1146,6 +1146,20 @@ }; }; + gpu_cm: clock-controller@1500 { + compatible = "ti,omap4-cm"; + reg = <0x1500 0x100>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x1500 0x100>; + + gpu_clkctrl: clk@20 { + compatible = "ti,clkctrl"; + reg = <0x20 0x4>; + #clock-cells = <2>; + }; + }; + l3init_cm: l3init_cm@1600 { compatible = "ti,omap4-cm"; reg = <0x1600 0x100>; diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 531e59f5be9c8f..b76b75bd9e0051 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -8,13 +8,14 @@ ifeq ($(plus_virt),+virt) plus_virt_def := -DREQUIRES_VIRT=1 endif +KVM := ../../../virt/kvm + ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic -CFLAGS_arm.o := $(plus_virt_def) +CFLAGS_$(KVM)/arm/arm.o := $(plus_virt_def) AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) -KVM := ../../../virt/kvm kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o obj-$(CONFIG_KVM_ARM_HOST) += hyp/ diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index f83786640f9420..9dab1f50a02f83 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -19,6 +19,7 @@ menuconfig ARCH_EXYNOS select EXYNOS_SROM select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS select GPIOLIB + select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5 && VIRTUALIZATION select HAVE_ARM_SCU if SMP select HAVE_S3C2410_I2C if I2C select HAVE_S3C2410_WATCHDOG if WATCHDOG diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h index aaa6092426ea2b..3de3d7a115b3a6 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h @@ -30,7 +30,6 @@ extern struct omap_hwmod_ocp_if am33xx_l3_main__gfx; extern struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc; extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan0; extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan1; -extern struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio; extern struct omap_hwmod_ocp_if am33xx_l4_ls__elm; extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0; extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1; @@ -72,8 +71,6 @@ extern struct omap_hwmod am33xx_rng_hwmod; extern struct omap_hwmod am33xx_ocmcram_hwmod; extern struct omap_hwmod am33xx_smartreflex0_hwmod; extern struct omap_hwmod am33xx_smartreflex1_hwmod; -extern struct omap_hwmod am33xx_cpgmac0_hwmod; -extern struct omap_hwmod am33xx_mdio_hwmod; extern struct omap_hwmod am33xx_dcan0_hwmod; extern struct omap_hwmod am33xx_dcan1_hwmod; extern struct omap_hwmod am33xx_elm_hwmod; diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c index 47a0e301b193f5..63698ffa6d27d7 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c @@ -122,12 +122,6 @@ struct omap_hwmod_ocp_if am33xx_l4_per__dcan1 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio = { - .master = &am33xx_cpgmac0_hwmod, - .slave = &am33xx_mdio_hwmod, - .user = OCP_USER_MPU, -}; - struct omap_hwmod_ocp_if am33xx_l4_ls__elm = { .master = &am33xx_l4_ls_hwmod, .slave = &am33xx_elm_hwmod, diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c index adb6271f819be0..dd939e1325c64a 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c @@ -349,54 +349,6 @@ struct omap_hwmod_class am33xx_control_hwmod_class = { .name = "control", }; -/* - * 'cpgmac' class - * cpsw/cpgmac sub system - */ -static struct omap_hwmod_class_sysconfig am33xx_cpgmac_sysc = { - .rev_offs = 0x0, - .sysc_offs = 0x8, - .syss_offs = 0x4, - .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | - SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE | - MSTANDBY_NO), - .sysc_fields = &omap_hwmod_sysc_type3, -}; - -static struct omap_hwmod_class am33xx_cpgmac0_hwmod_class = { - .name = "cpgmac0", - .sysc = &am33xx_cpgmac_sysc, -}; - -struct omap_hwmod am33xx_cpgmac0_hwmod = { - .name = "cpgmac0", - .class = &am33xx_cpgmac0_hwmod_class, - .clkdm_name = "cpsw_125mhz_clkdm", - .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), - .main_clk = "cpsw_125mhz_gclk", - .mpu_rt_idx = 1, - .prcm = { - .omap4 = { - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * mdio class - */ -static struct omap_hwmod_class am33xx_mdio_hwmod_class = { - .name = "davinci_mdio", -}; - -struct omap_hwmod am33xx_mdio_hwmod = { - .name = "davinci_mdio", - .class = &am33xx_mdio_hwmod_class, - .clkdm_name = "cpsw_125mhz_clkdm", - .main_clk = "cpsw_125mhz_gclk", -}; - /* * dcan class */ @@ -1072,7 +1024,6 @@ static void omap_hwmod_am33xx_clkctrl(void) CLKCTRL(am33xx_tptc1_hwmod, AM33XX_CM_PER_TPTC1_CLKCTRL_OFFSET); CLKCTRL(am33xx_tptc2_hwmod, AM33XX_CM_PER_TPTC2_CLKCTRL_OFFSET); CLKCTRL(am33xx_gfx_hwmod, AM33XX_CM_GFX_GFX_CLKCTRL_OFFSET); - CLKCTRL(am33xx_cpgmac0_hwmod, AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET); CLKCTRL(am33xx_pruss_hwmod, AM33XX_CM_PER_PRUSS_CLKCTRL_OFFSET); CLKCTRL(am33xx_mpu_hwmod , AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET); CLKCTRL(am33xx_l3_instr_hwmod , AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET); @@ -1134,7 +1085,6 @@ static void omap_hwmod_am43xx_clkctrl(void) CLKCTRL(am33xx_tptc1_hwmod, AM43XX_CM_PER_TPTC1_CLKCTRL_OFFSET); CLKCTRL(am33xx_tptc2_hwmod, AM43XX_CM_PER_TPTC2_CLKCTRL_OFFSET); CLKCTRL(am33xx_gfx_hwmod, AM43XX_CM_GFX_GFX_CLKCTRL_OFFSET); - CLKCTRL(am33xx_cpgmac0_hwmod, AM43XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET); CLKCTRL(am33xx_pruss_hwmod, AM43XX_CM_PER_PRUSS_CLKCTRL_OFFSET); CLKCTRL(am33xx_mpu_hwmod , AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET); CLKCTRL(am33xx_l3_instr_hwmod , AM43XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET); diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index c965af275e3414..2bcb6345b8735b 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -372,13 +372,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__adc_tsc = { .user = OCP_USER_MPU, }; -static struct omap_hwmod_ocp_if am33xx_l4_hs__cpgmac0 = { - .master = &am33xx_l4_hs_hwmod, - .slave = &am33xx_cpgmac0_hwmod, - .clk = "cpsw_125mhz_gclk", - .user = OCP_USER_MPU, -}; - static struct omap_hwmod_ocp_if am33xx_l3_main__lcdc = { .master = &am33xx_l3_main_hwmod, .slave = &am33xx_lcdc_hwmod, @@ -462,8 +455,6 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = { &am33xx_l3_main__tptc2, &am33xx_l3_main__ocmc, &am33xx_l3_s__usbss, - &am33xx_l4_hs__cpgmac0, - &am33xx_cpgmac0__mdio, &am33xx_l3_main__sha0, &am33xx_l3_main__aes0, &am33xx_l4_per__rng, diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c index 69571abc14fd06..5c3db6b6438bc3 100644 --- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c @@ -597,13 +597,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_wkup__adc_tsc = { .user = OCP_USER_MPU, }; -static struct omap_hwmod_ocp_if am43xx_l4_hs__cpgmac0 = { - .master = &am43xx_l4_hs_hwmod, - .slave = &am33xx_cpgmac0_hwmod, - .clk = "cpsw_125mhz_gclk", - .user = OCP_USER_MPU, -}; - static struct omap_hwmod_ocp_if am43xx_l4_wkup__timer1 = { .master = &am33xx_l4_wkup_hwmod, .slave = &am33xx_timer1_hwmod, @@ -859,8 +852,6 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = { &am33xx_l3_main__tptc1, &am33xx_l3_main__tptc2, &am33xx_l3_main__ocmc, - &am43xx_l4_hs__cpgmac0, - &am33xx_cpgmac0__mdio, &am33xx_l3_main__sha0, &am33xx_l3_main__aes0, &am43xx_l3_main__des, diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index a6f2a10cdc3e47..28ea2960a9b28c 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -28,7 +28,6 @@ #include "cm2_44xx.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" -#include "wd_timer.h" /* Base offset for all OMAP4 interrupts external to MPUSS */ #define OMAP44XX_IRQ_GIC_START 32 @@ -275,29 +274,6 @@ static struct omap_hwmod omap44xx_aess_hwmod = { }, }; -/* - * 'c2c' class - * chip 2 chip interface used to plug the ape soc (omap) with an external modem - * soc - */ - -static struct omap_hwmod_class omap44xx_c2c_hwmod_class = { - .name = "c2c", -}; - -/* c2c */ -static struct omap_hwmod omap44xx_c2c_hwmod = { - .name = "c2c", - .class = &omap44xx_c2c_hwmod_class, - .clkdm_name = "d2d_clkdm", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_D2D_SAD2D_CONTEXT_OFFSET, - }, - }, -}; - /* * 'counter' class * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock @@ -1085,41 +1061,6 @@ static struct omap_hwmod omap44xx_gpmc_hwmod = { }, }; -/* - * 'gpu' class - * 2d/3d graphics accelerator - */ - -static struct omap_hwmod_class_sysconfig omap44xx_gpu_sysc = { - .rev_offs = 0x1fc00, - .sysc_offs = 0x1fc10, - .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | - MSTANDBY_SMART | MSTANDBY_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type2, -}; - -static struct omap_hwmod_class omap44xx_gpu_hwmod_class = { - .name = "gpu", - .sysc = &omap44xx_gpu_sysc, -}; - -/* gpu */ -static struct omap_hwmod omap44xx_gpu_hwmod = { - .name = "gpu", - .class = &omap44xx_gpu_hwmod_class, - .clkdm_name = "l3_gfx_clkdm", - .main_clk = "sgx_clk_mux", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_GFX_GFX_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_GFX_GFX_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - /* * 'hdq1w' class * hdq / 1-wire serial interface controller @@ -2433,61 +2374,6 @@ static struct omap_hwmod omap44xx_usb_tll_hs_hwmod = { }, }; -/* - * 'wd_timer' class - * 32-bit watchdog upward counter that generates a pulse on the reset pin on - * overflow condition - */ - -static struct omap_hwmod_class_sysconfig omap44xx_wd_timer_sysc = { - .rev_offs = 0x0000, - .sysc_offs = 0x0010, - .syss_offs = 0x0014, - .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE | - SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type1, -}; - -static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = { - .name = "wd_timer", - .sysc = &omap44xx_wd_timer_sysc, - .pre_shutdown = &omap2_wd_timer_disable, - .reset = &omap2_wd_timer_reset, -}; - -/* wd_timer2 */ -static struct omap_hwmod omap44xx_wd_timer2_hwmod = { - .name = "wd_timer2", - .class = &omap44xx_wd_timer_hwmod_class, - .clkdm_name = "l4_wkup_clkdm", - .main_clk = "sys_32k_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM_WKUP_WDT2_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_WKUP_WDT2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* wd_timer3 */ -static struct omap_hwmod omap44xx_wd_timer3_hwmod = { - .name = "wd_timer3", - .class = &omap44xx_wd_timer_hwmod_class, - .clkdm_name = "abe_clkdm", - .main_clk = "sys_32k_ck", - .prcm = { - .omap4 = { - .clkctrl_offs = OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET, - .context_offs = OMAP4_RM_ABE_WDT3_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - - /* * interfaces */ @@ -2596,14 +2482,6 @@ static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* gpu -> l3_main_2 */ -static struct omap_hwmod_ocp_if omap44xx_gpu__l3_main_2 = { - .master = &omap44xx_gpu_hwmod, - .slave = &omap44xx_l3_main_2_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - /* hsi -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = { .master = &omap44xx_hsi_hwmod, @@ -2788,14 +2666,6 @@ static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = { .user = OCP_USER_SDMA, }; -/* l3_main_2 -> c2c */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__c2c = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_c2c_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - /* l4_wkup -> counter_32k */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = { .master = &omap44xx_l4_wkup_hwmod, @@ -3028,14 +2898,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpmc = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l3_main_2 -> gpu */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpu = { - .master = &omap44xx_l3_main_2_hwmod, - .slave = &omap44xx_gpu_hwmod, - .clk = "l3_div_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - /* l4_per -> hdq1w */ static struct omap_hwmod_ocp_if omap44xx_l4_per__hdq1w = { .master = &omap44xx_l4_per_hwmod, @@ -3396,30 +3258,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_tll_hs = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l4_wkup -> wd_timer2 */ -static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = { - .master = &omap44xx_l4_wkup_hwmod, - .slave = &omap44xx_wd_timer2_hwmod, - .clk = "l4_wkup_clk_mux_ck", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_abe -> wd_timer3 */ -static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = { - .master = &omap44xx_l4_abe_hwmod, - .slave = &omap44xx_wd_timer3_hwmod, - .clk = "ocp_abe_iclk", - .user = OCP_USER_MPU, -}; - -/* l4_abe -> wd_timer3 (dma) */ -static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = { - .master = &omap44xx_l4_abe_hwmod, - .slave = &omap44xx_wd_timer3_hwmod, - .clk = "ocp_abe_iclk", - .user = OCP_USER_SDMA, -}; - /* mpu -> emif1 */ static struct omap_hwmod_ocp_if omap44xx_mpu__emif1 = { .master = &omap44xx_mpu_hwmod, @@ -3450,7 +3288,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_debugss__l3_main_2, &omap44xx_dma_system__l3_main_2, &omap44xx_fdif__l3_main_2, - &omap44xx_gpu__l3_main_2, &omap44xx_hsi__l3_main_2, &omap44xx_ipu__l3_main_2, &omap44xx_iss__l3_main_2, @@ -3474,7 +3311,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_cfg__ocp_wp_noc, &omap44xx_l4_abe__aess, &omap44xx_l4_abe__aess_dma, - &omap44xx_l3_main_2__c2c, &omap44xx_l4_wkup__counter_32k, &omap44xx_l4_cfg__ctrl_module_core, &omap44xx_l4_cfg__ctrl_module_pad_core, @@ -3503,7 +3339,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_per__elm, &omap44xx_l4_cfg__fdif, &omap44xx_l3_main_2__gpmc, - &omap44xx_l3_main_2__gpu, &omap44xx_l4_per__hdq1w, &omap44xx_l4_cfg__hsi, &omap44xx_l3_main_2__ipu, @@ -3551,9 +3386,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_cfg__usb_host_hs, &omap44xx_l4_cfg__usb_otg_hs, &omap44xx_l4_cfg__usb_tll_hs, - &omap44xx_l4_wkup__wd_timer2, - &omap44xx_l4_abe__wd_timer3, - &omap44xx_l4_abe__wd_timer3_dma, &omap44xx_mpu__emif1, &omap44xx_mpu__emif2, &omap44xx_l3_main_2__aes1, diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 1ec21e9ba1e99a..e5bd549d2a5e7f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -284,56 +284,6 @@ static struct omap_hwmod dra7xx_ctrl_module_wkup_hwmod = { }, }; -/* - * 'gmac' class - * cpsw/gmac sub system - */ -static struct omap_hwmod_class_sysconfig dra7xx_gmac_sysc = { - .rev_offs = 0x0, - .sysc_offs = 0x8, - .syss_offs = 0x4, - .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | - SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE | - MSTANDBY_NO), - .sysc_fields = &omap_hwmod_sysc_type3, -}; - -static struct omap_hwmod_class dra7xx_gmac_hwmod_class = { - .name = "gmac", - .sysc = &dra7xx_gmac_sysc, -}; - -static struct omap_hwmod dra7xx_gmac_hwmod = { - .name = "gmac", - .class = &dra7xx_gmac_hwmod_class, - .clkdm_name = "gmac_clkdm", - .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), - .main_clk = "dpll_gmac_ck", - .mpu_rt_idx = 1, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_GMAC_GMAC_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_GMAC_GMAC_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'mdio' class - */ -static struct omap_hwmod_class dra7xx_mdio_hwmod_class = { - .name = "davinci_mdio", -}; - -static struct omap_hwmod dra7xx_mdio_hwmod = { - .name = "davinci_mdio", - .class = &dra7xx_mdio_hwmod_class, - .clkdm_name = "gmac_clkdm", - .main_clk = "dpll_gmac_ck", -}; - /* * 'dcan' class * @@ -1046,281 +996,6 @@ static struct omap_hwmod dra7xx_mailbox13_hwmod = { }, }; -/* - * 'mcspi' class - * - */ - -static struct omap_hwmod_class_sysconfig dra7xx_mcspi_sysc = { - .rev_offs = 0x0000, - .sysc_offs = 0x0010, - .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | - SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), - .sysc_fields = &omap_hwmod_sysc_type2, -}; - -static struct omap_hwmod_class dra7xx_mcspi_hwmod_class = { - .name = "mcspi", - .sysc = &dra7xx_mcspi_sysc, -}; - -/* mcspi1 */ -static struct omap_hwmod dra7xx_mcspi1_hwmod = { - .name = "mcspi1", - .class = &dra7xx_mcspi_hwmod_class, - .clkdm_name = "l4per_clkdm", - .main_clk = "func_48m_fclk", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER_MCSPI1_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER_MCSPI1_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* mcspi2 */ -static struct omap_hwmod dra7xx_mcspi2_hwmod = { - .name = "mcspi2", - .class = &dra7xx_mcspi_hwmod_class, - .clkdm_name = "l4per_clkdm", - .main_clk = "func_48m_fclk", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER_MCSPI2_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER_MCSPI2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* mcspi3 */ -static struct omap_hwmod dra7xx_mcspi3_hwmod = { - .name = "mcspi3", - .class = &dra7xx_mcspi_hwmod_class, - .clkdm_name = "l4per_clkdm", - .main_clk = "func_48m_fclk", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER_MCSPI3_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER_MCSPI3_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* mcspi4 */ -static struct omap_hwmod dra7xx_mcspi4_hwmod = { - .name = "mcspi4", - .class = &dra7xx_mcspi_hwmod_class, - .clkdm_name = "l4per_clkdm", - .main_clk = "func_48m_fclk", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER_MCSPI4_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER_MCSPI4_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'mcasp' class - * - */ -static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = { - .rev_offs = 0, - .sysc_offs = 0x0004, - .sysc_flags = SYSC_HAS_SIDLEMODE, - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), - .sysc_fields = &omap_hwmod_sysc_type3, -}; - -static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = { - .name = "mcasp", - .sysc = &dra7xx_mcasp_sysc, -}; - -/* mcasp1 */ -static struct omap_hwmod_opt_clk mcasp1_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp1_ahclkx_mux" }, - { .role = "ahclkr", .clk = "mcasp1_ahclkr_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp1_hwmod = { - .name = "mcasp1", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "ipu_clkdm", - .main_clk = "mcasp1_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_IPU_MCASP1_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_IPU_MCASP1_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp1_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp1_opt_clks), -}; - -/* mcasp2 */ -static struct omap_hwmod_opt_clk mcasp2_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp2_ahclkx_mux" }, - { .role = "ahclkr", .clk = "mcasp2_ahclkr_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp2_hwmod = { - .name = "mcasp2", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "mcasp2_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP2_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_MCASP2_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp2_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp2_opt_clks), -}; - -/* mcasp3 */ -static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp3_hwmod = { - .name = "mcasp3", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "mcasp3_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp3_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp3_opt_clks), -}; - -/* mcasp4 */ -static struct omap_hwmod_opt_clk mcasp4_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp4_ahclkx_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp4_hwmod = { - .name = "mcasp4", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "mcasp4_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP4_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_MCASP4_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp4_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp4_opt_clks), -}; - -/* mcasp5 */ -static struct omap_hwmod_opt_clk mcasp5_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp5_ahclkx_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp5_hwmod = { - .name = "mcasp5", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "mcasp5_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP5_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_MCASP5_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp5_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp5_opt_clks), -}; - -/* mcasp6 */ -static struct omap_hwmod_opt_clk mcasp6_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp6_ahclkx_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp6_hwmod = { - .name = "mcasp6", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "mcasp6_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP6_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_MCASP6_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp6_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp6_opt_clks), -}; - -/* mcasp7 */ -static struct omap_hwmod_opt_clk mcasp7_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp7_ahclkx_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp7_hwmod = { - .name = "mcasp7", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "mcasp7_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP7_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_MCASP7_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp7_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp7_opt_clks), -}; - -/* mcasp8 */ -static struct omap_hwmod_opt_clk mcasp8_opt_clks[] = { - { .role = "ahclkx", .clk = "mcasp8_ahclkx_mux" }, -}; - -static struct omap_hwmod dra7xx_mcasp8_hwmod = { - .name = "mcasp8", - .class = &dra7xx_mcasp_hwmod_class, - .clkdm_name = "l4per2_clkdm", - .main_clk = "mcasp8_aux_gfclk_mux", - .flags = HWMOD_OPT_CLKS_NEEDED, - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP8_CLKCTRL_OFFSET, - .context_offs = DRA7XX_RM_L4PER2_MCASP8_CONTEXT_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, - .opt_clks = mcasp8_opt_clks, - .opt_clks_cnt = ARRAY_SIZE(mcasp8_opt_clks), -}; - /* * 'mpu' class * @@ -2303,19 +1978,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_wkup__ctrl_module_wkup = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -static struct omap_hwmod_ocp_if dra7xx_l4_per2__cpgmac0 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_gmac_hwmod, - .clk = "dpll_gmac_ck", - .user = OCP_USER_MPU, -}; - -static struct omap_hwmod_ocp_if dra7xx_gmac__mdio = { - .master = &dra7xx_gmac_hwmod, - .slave = &dra7xx_mdio_hwmod, - .user = OCP_USER_MPU, -}; - /* l4_wkup -> dcan1 */ static struct omap_hwmod_ocp_if dra7xx_l4_wkup__dcan1 = { .master = &dra7xx_l4_wkup_hwmod, @@ -2412,94 +2074,6 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__sha0 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l4_per2 -> mcasp1 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp1 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp1_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> mcasp1 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp1 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_mcasp1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> mcasp2 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp2 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp2_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> mcasp2 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp2 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_mcasp2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> mcasp3 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp3_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l3_main_1 -> mcasp3 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = { - .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_mcasp3_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> mcasp4 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp4 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp4_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> mcasp5 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp5 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp5_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> mcasp6 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp6 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp6_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> mcasp7 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp7 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp7_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per2 -> mcasp8 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp8 = { - .master = &dra7xx_l4_per2_hwmod, - .slave = &dra7xx_mcasp8_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - /* l4_per1 -> elm */ static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { .master = &dra7xx_l4_per1_hwmod, @@ -2628,38 +2202,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per3__mailbox13 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l4_per1 -> mcspi1 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi1 = { - .master = &dra7xx_l4_per1_hwmod, - .slave = &dra7xx_mcspi1_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per1 -> mcspi2 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi2 = { - .master = &dra7xx_l4_per1_hwmod, - .slave = &dra7xx_mcspi2_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per1 -> mcspi3 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi3 = { - .master = &dra7xx_l4_per1_hwmod, - .slave = &dra7xx_mcspi3_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_per1 -> mcspi4 */ -static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi4 = { - .master = &dra7xx_l4_per1_hwmod, - .slave = &dra7xx_mcspi4_hwmod, - .clk = "l3_iclk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - /* l4_cfg -> mpu */ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__mpu = { .master = &dra7xx_l4_cfg_hwmod, @@ -3021,19 +2563,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_wkup__ctrl_module_wkup, &dra7xx_l4_wkup__dcan1, &dra7xx_l4_per2__dcan2, - &dra7xx_l4_per2__cpgmac0, - &dra7xx_l4_per2__mcasp1, - &dra7xx_l3_main_1__mcasp1, - &dra7xx_l4_per2__mcasp2, - &dra7xx_l3_main_1__mcasp2, - &dra7xx_l4_per2__mcasp3, - &dra7xx_l3_main_1__mcasp3, - &dra7xx_l4_per2__mcasp4, - &dra7xx_l4_per2__mcasp5, - &dra7xx_l4_per2__mcasp6, - &dra7xx_l4_per2__mcasp7, - &dra7xx_l4_per2__mcasp8, - &dra7xx_gmac__mdio, &dra7xx_l4_cfg__dma_system, &dra7xx_l3_main_1__tpcc, &dra7xx_l3_main_1__tptc0, @@ -3060,10 +2589,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_per3__mailbox11, &dra7xx_l4_per3__mailbox12, &dra7xx_l4_per3__mailbox13, - &dra7xx_l4_per1__mcspi1, - &dra7xx_l4_per1__mcspi2, - &dra7xx_l4_per1__mcspi3, - &dra7xx_l4_per1__mcspi4, &dra7xx_l4_cfg__mpu, &dra7xx_l4_cfg__ocp2scp1, &dra7xx_l4_cfg__ocp2scp3, diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c index ce42cc640a61a3..71d85ff323f73d 100644 --- a/arch/arm/plat-samsung/watchdog-reset.c +++ b/arch/arm/plat-samsung/watchdog-reset.c @@ -62,6 +62,7 @@ void samsung_wdt_reset(void) #ifdef CONFIG_OF static const struct of_device_id s3c2410_wdt_match[] = { { .compatible = "samsung,s3c2410-wdt" }, + { .compatible = "samsung,s3c6410-wdt" }, {}, }; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 835a1509882bf0..37c610963eeeda 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -134,6 +134,7 @@ config ARM64 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_VMAP_STACK select HAVE_ARM_SMCCC + select HAVE_ASM_MODVERSIONS select HAVE_EBPF_JIT select HAVE_C_RECORDMCOUNT select HAVE_CMPXCHG_DOUBLE @@ -1134,7 +1135,7 @@ config ARM64_TAGGED_ADDR_ABI When this option is enabled, user applications can opt in to a relaxed ABI via prctl() allowing tagged addresses to be passed to system calls as pointer arguments. For details, see - Documentation/arm64/tagged-address-abi.txt. + Documentation/arm64/tagged-address-abi.rst. menuconfig COMPAT bool "Kernel support for 32-bit EL0" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f843d298792dcb..84a3d502c5a535 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -108,7 +108,7 @@ endif CHECKFLAGS += -D__aarch64__ ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) -KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds +KBUILD_LDS_MODULE += $(srctree)/arch/arm64/kernel/module.lds endif # Default value diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile index caed4334f27dea..243338c914a4f5 100644 --- a/arch/arm64/boot/dts/marvell/Makefile +++ b/arch/arm64/boot/dts/marvell/Makefile @@ -2,6 +2,7 @@ # Mvebu SoC Family dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-db.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin.dtb +dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-turris-mox.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-uDPU.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-clearfog-gt-8k.dtb diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts new file mode 100644 index 00000000000000..d105986c6be1a6 --- /dev/null +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -0,0 +1,840 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Device Tree file for CZ.NIC Turris Mox Board + * 2019 by Marek Behun + */ + +/dts-v1/; + +#include +#include +#include +#include "armada-372x.dtsi" + +/ { + model = "CZ.NIC Turris Mox Board"; + compatible = "cznic,turris-mox", "marvell,armada3720", + "marvell,armada3710"; + + aliases { + spi0 = &spi0; + ethernet1 = ð1; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x00000000 0x00000000 0x20000000>; + }; + + leds { + compatible = "gpio-leds"; + red { + label = "mox:red:activity"; + gpios = <&gpiosb 21 GPIO_ACTIVE_LOW>; + linux,default-trigger = "default-on"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + reset { + label = "reset"; + linux,code = ; + gpios = <&gpiosb 20 GPIO_ACTIVE_LOW>; + debounce-interval = <60>; + }; + }; + + exp_usb3_vbus: usb3-vbus { + compatible = "regulator-fixed"; + regulator-name = "usb3-vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + enable-active-high; + regulator-always-on; + gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>; + }; + + usb3_phy: usb3-phy { + compatible = "usb-nop-xceiv"; + vcc-supply = <&exp_usb3_vbus>; + }; + + vsdc_reg: vsdc-reg { + compatible = "regulator-gpio"; + regulator-name = "vsdc"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + + gpios = <&gpiosb 23 GPIO_ACTIVE_HIGH>; + gpios-states = <0>; + states = <1800000 0x1 + 3300000 0x0>; + enable-active-high; + }; + + vsdio_reg: vsdio-reg { + compatible = "regulator-gpio"; + regulator-name = "vsdio"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + + gpios = <&gpiosb 22 GPIO_ACTIVE_HIGH>; + gpios-states = <0>; + states = <1800000 0x1 + 3300000 0x0>; + enable-active-high; + }; + + sdhci1_pwrseq: sdhci1-pwrseq { + compatible = "mmc-pwrseq-simple"; + reset-gpios = <&gpionb 19 GPIO_ACTIVE_HIGH>; + status = "okay"; + }; + + sfp: sfp { + compatible = "sff,sfp+"; + i2c-bus = <&i2c0>; + los-gpio = <&moxtet_sfp 0 GPIO_ACTIVE_HIGH>; + tx-fault-gpio = <&moxtet_sfp 1 GPIO_ACTIVE_HIGH>; + mod-def0-gpio = <&moxtet_sfp 2 GPIO_ACTIVE_LOW>; + tx-disable-gpio = <&moxtet_sfp 4 GPIO_ACTIVE_HIGH>; + rate-select0-gpio = <&moxtet_sfp 5 GPIO_ACTIVE_HIGH>; + + /* enabled by U-Boot if SFP module is present */ + status = "disabled"; + }; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; + clock-frequency = <100000>; + status = "okay"; + + rtc@6f { + compatible = "microchip,mcp7940x"; + reg = <0x6f>; + }; +}; + +&pcie_reset_pins { + function = "gpio"; +}; + +&pcie0 { + pinctrl-names = "default"; + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>; + status = "okay"; + max-link-speed = <2>; + reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>; + phys = <&comphy1 0>; + + /* enabled by U-Boot if PCIe module is present */ + status = "disabled"; +}; + +&uart0 { + status = "okay"; +}; + +ð0 { + pinctrl-names = "default"; + pinctrl-0 = <&rgmii_pins>; + phy-mode = "rgmii-id"; + phy = <&phy1>; + status = "okay"; +}; + +ð1 { + phy-mode = "2500base-x"; + managed = "in-band-status"; + phys = <&comphy0 1>; +}; + +&sdhci0 { + wp-inverted; + bus-width = <4>; + cd-gpios = <&gpionb 10 GPIO_ACTIVE_HIGH>; + vqmmc-supply = <&vsdc_reg>; + marvell,pad-type = "sd"; + status = "okay"; +}; + +&sdhci1 { + pinctrl-names = "default"; + pinctrl-0 = <&sdio_pins>; + non-removable; + bus-width = <4>; + marvell,pad-type = "sd"; + vqmmc-supply = <&vsdio_reg>; + mmc-pwrseq = <&sdhci1_pwrseq>; + status = "okay"; +}; + +&spi0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&spi_quad_pins &spi_cs1_pins>; + assigned-clocks = <&nb_periph_clk 7>; + assigned-clock-parents = <&tbg 1>; + assigned-clock-rates = <20000000>; + + spi-flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <20000000>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "secure-firmware"; + reg = <0x0 0x20000>; + }; + + partition@20000 { + label = "u-boot"; + reg = <0x20000 0x160000>; + }; + + partition@180000 { + label = "u-boot-env"; + reg = <0x180000 0x10000>; + }; + + partition@190000 { + label = "Rescue system"; + reg = <0x190000 0x660000>; + }; + + partition@7f0000 { + label = "dtb"; + reg = <0x7f0000 0x10000>; + }; + }; + }; + + moxtet: moxtet@1 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "cznic,moxtet"; + reg = <1>; + reset-gpios = <&gpiosb 2 GPIO_ACTIVE_LOW>; + spi-max-frequency = <10000000>; + spi-cpol; + spi-cpha; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&gpiosb>; + interrupts = <5 IRQ_TYPE_EDGE_FALLING>; + status = "okay"; + + moxtet_sfp: gpio@0 { + compatible = "cznic,moxtet-gpio"; + gpio-controller; + #gpio-cells = <2>; + reg = <0>; + status = "disabled"; + }; + }; +}; + +&usb2 { + status = "okay"; +}; + +&usb3 { + status = "okay"; + phys = <&comphy2 0>; + usb-phy = <&usb3_phy>; +}; + +&mdio { + pinctrl-names = "default"; + pinctrl-0 = <&smi_pins>; + status = "okay"; + + phy1: ethernet-phy@1 { + reg = <1>; + }; + + /* switch nodes are enabled by U-Boot if modules are present */ + switch0@10 { + compatible = "marvell,mv88e6190"; + reg = <0x10 0>; + dsa,member = <0 0>; + interrupt-parent = <&moxtet>; + interrupts = ; + status = "disabled"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch0phy1: switch0phy1@1 { + reg = <0x1>; + }; + + switch0phy2: switch0phy2@2 { + reg = <0x2>; + }; + + switch0phy3: switch0phy3@3 { + reg = <0x3>; + }; + + switch0phy4: switch0phy4@4 { + reg = <0x4>; + }; + + switch0phy5: switch0phy5@5 { + reg = <0x5>; + }; + + switch0phy6: switch0phy6@6 { + reg = <0x6>; + }; + + switch0phy7: switch0phy7@7 { + reg = <0x7>; + }; + + switch0phy8: switch0phy8@8 { + reg = <0x8>; + }; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <0x1>; + label = "lan1"; + phy-handle = <&switch0phy1>; + }; + + port@2 { + reg = <0x2>; + label = "lan2"; + phy-handle = <&switch0phy2>; + }; + + port@3 { + reg = <0x3>; + label = "lan3"; + phy-handle = <&switch0phy3>; + }; + + port@4 { + reg = <0x4>; + label = "lan4"; + phy-handle = <&switch0phy4>; + }; + + port@5 { + reg = <0x5>; + label = "lan5"; + phy-handle = <&switch0phy5>; + }; + + port@6 { + reg = <0x6>; + label = "lan6"; + phy-handle = <&switch0phy6>; + }; + + port@7 { + reg = <0x7>; + label = "lan7"; + phy-handle = <&switch0phy7>; + }; + + port@8 { + reg = <0x8>; + label = "lan8"; + phy-handle = <&switch0phy8>; + }; + + port@9 { + reg = <0x9>; + label = "cpu"; + ethernet = <ð1>; + phy-mode = "2500base-x"; + managed = "in-band-status"; + }; + + switch0port10: port@a { + reg = <0xa>; + label = "dsa"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + link = <&switch1port9 &switch2port9>; + status = "disabled"; + }; + + port-sfp@a { + reg = <0xa>; + label = "sfp"; + sfp = <&sfp>; + phy-mode = "sgmii"; + managed = "in-band-status"; + status = "disabled"; + }; + }; + }; + + switch0@2 { + compatible = "marvell,mv88e6085"; + reg = <0x2 0>; + dsa,member = <0 0>; + interrupt-parent = <&moxtet>; + interrupts = ; + status = "disabled"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch0phy1_topaz: switch0phy1@11 { + reg = <0x11>; + }; + + switch0phy2_topaz: switch0phy2@12 { + reg = <0x12>; + }; + + switch0phy3_topaz: switch0phy3@13 { + reg = <0x13>; + }; + + switch0phy4_topaz: switch0phy4@14 { + reg = <0x14>; + }; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <0x1>; + label = "lan1"; + phy-handle = <&switch0phy1_topaz>; + }; + + port@2 { + reg = <0x2>; + label = "lan2"; + phy-handle = <&switch0phy2_topaz>; + }; + + port@3 { + reg = <0x3>; + label = "lan3"; + phy-handle = <&switch0phy3_topaz>; + }; + + port@4 { + reg = <0x4>; + label = "lan4"; + phy-handle = <&switch0phy4_topaz>; + }; + + port@5 { + reg = <0x5>; + label = "cpu"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + ethernet = <ð1>; + }; + }; + }; + + switch1@11 { + compatible = "marvell,mv88e6190"; + reg = <0x11 0>; + dsa,member = <0 1>; + interrupt-parent = <&moxtet>; + interrupts = ; + status = "disabled"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch1phy1: switch1phy1@1 { + reg = <0x1>; + }; + + switch1phy2: switch1phy2@2 { + reg = <0x2>; + }; + + switch1phy3: switch1phy3@3 { + reg = <0x3>; + }; + + switch1phy4: switch1phy4@4 { + reg = <0x4>; + }; + + switch1phy5: switch1phy5@5 { + reg = <0x5>; + }; + + switch1phy6: switch1phy6@6 { + reg = <0x6>; + }; + + switch1phy7: switch1phy7@7 { + reg = <0x7>; + }; + + switch1phy8: switch1phy8@8 { + reg = <0x8>; + }; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <0x1>; + label = "lan9"; + phy-handle = <&switch1phy1>; + }; + + port@2 { + reg = <0x2>; + label = "lan10"; + phy-handle = <&switch1phy2>; + }; + + port@3 { + reg = <0x3>; + label = "lan11"; + phy-handle = <&switch1phy3>; + }; + + port@4 { + reg = <0x4>; + label = "lan12"; + phy-handle = <&switch1phy4>; + }; + + port@5 { + reg = <0x5>; + label = "lan13"; + phy-handle = <&switch1phy5>; + }; + + port@6 { + reg = <0x6>; + label = "lan14"; + phy-handle = <&switch1phy6>; + }; + + port@7 { + reg = <0x7>; + label = "lan15"; + phy-handle = <&switch1phy7>; + }; + + port@8 { + reg = <0x8>; + label = "lan16"; + phy-handle = <&switch1phy8>; + }; + + switch1port9: port@9 { + reg = <0x9>; + label = "dsa"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + link = <&switch0port10>; + }; + + switch1port10: port@a { + reg = <0xa>; + label = "dsa"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + link = <&switch2port9>; + status = "disabled"; + }; + + port-sfp@a { + reg = <0xa>; + label = "sfp"; + sfp = <&sfp>; + phy-mode = "sgmii"; + managed = "in-band-status"; + status = "disabled"; + }; + }; + }; + + switch1@2 { + compatible = "marvell,mv88e6085"; + reg = <0x2 0>; + dsa,member = <0 1>; + interrupt-parent = <&moxtet>; + interrupts = ; + status = "disabled"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch1phy1_topaz: switch1phy1@11 { + reg = <0x11>; + }; + + switch1phy2_topaz: switch1phy2@12 { + reg = <0x12>; + }; + + switch1phy3_topaz: switch1phy3@13 { + reg = <0x13>; + }; + + switch1phy4_topaz: switch1phy4@14 { + reg = <0x14>; + }; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <0x1>; + label = "lan9"; + phy-handle = <&switch1phy1_topaz>; + }; + + port@2 { + reg = <0x2>; + label = "lan10"; + phy-handle = <&switch1phy2_topaz>; + }; + + port@3 { + reg = <0x3>; + label = "lan11"; + phy-handle = <&switch1phy3_topaz>; + }; + + port@4 { + reg = <0x4>; + label = "lan12"; + phy-handle = <&switch1phy4_topaz>; + }; + + port@5 { + reg = <0x5>; + label = "dsa"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + link = <&switch0port10>; + }; + }; + }; + + switch2@12 { + compatible = "marvell,mv88e6190"; + reg = <0x12 0>; + dsa,member = <0 2>; + interrupt-parent = <&moxtet>; + interrupts = ; + status = "disabled"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch2phy1: switch2phy1@1 { + reg = <0x1>; + }; + + switch2phy2: switch2phy2@2 { + reg = <0x2>; + }; + + switch2phy3: switch2phy3@3 { + reg = <0x3>; + }; + + switch2phy4: switch2phy4@4 { + reg = <0x4>; + }; + + switch2phy5: switch2phy5@5 { + reg = <0x5>; + }; + + switch2phy6: switch2phy6@6 { + reg = <0x6>; + }; + + switch2phy7: switch2phy7@7 { + reg = <0x7>; + }; + + switch2phy8: switch2phy8@8 { + reg = <0x8>; + }; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <0x1>; + label = "lan17"; + phy-handle = <&switch2phy1>; + }; + + port@2 { + reg = <0x2>; + label = "lan18"; + phy-handle = <&switch2phy2>; + }; + + port@3 { + reg = <0x3>; + label = "lan19"; + phy-handle = <&switch2phy3>; + }; + + port@4 { + reg = <0x4>; + label = "lan20"; + phy-handle = <&switch2phy4>; + }; + + port@5 { + reg = <0x5>; + label = "lan21"; + phy-handle = <&switch2phy5>; + }; + + port@6 { + reg = <0x6>; + label = "lan22"; + phy-handle = <&switch2phy6>; + }; + + port@7 { + reg = <0x7>; + label = "lan23"; + phy-handle = <&switch2phy7>; + }; + + port@8 { + reg = <0x8>; + label = "lan24"; + phy-handle = <&switch2phy8>; + }; + + switch2port9: port@9 { + reg = <0x9>; + label = "dsa"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + link = <&switch1port10 &switch0port10>; + }; + + port-sfp@a { + reg = <0xa>; + label = "sfp"; + sfp = <&sfp>; + phy-mode = "sgmii"; + managed = "in-band-status"; + status = "disabled"; + }; + }; + }; + + switch2@2 { + compatible = "marvell,mv88e6085"; + reg = <0x2 0>; + dsa,member = <0 2>; + interrupt-parent = <&moxtet>; + interrupts = ; + status = "disabled"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch2phy1_topaz: switch2phy1@11 { + reg = <0x11>; + }; + + switch2phy2_topaz: switch2phy2@12 { + reg = <0x12>; + }; + + switch2phy3_topaz: switch2phy3@13 { + reg = <0x13>; + }; + + switch2phy4_topaz: switch2phy4@14 { + reg = <0x14>; + }; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <0x1>; + label = "lan17"; + phy-handle = <&switch2phy1_topaz>; + }; + + port@2 { + reg = <0x2>; + label = "lan18"; + phy-handle = <&switch2phy2_topaz>; + }; + + port@3 { + reg = <0x3>; + label = "lan19"; + phy-handle = <&switch2phy3_topaz>; + }; + + port@4 { + reg = <0x4>; + label = "lan20"; + phy-handle = <&switch2phy4_topaz>; + }; + + port@5 { + reg = <0x5>; + label = "dsa"; + phy-mode = "2500base-x"; + managed = "in-band-status"; + link = <&switch1port10 &switch0port10>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 7f69e3dfcb13ae..000c135e39b73c 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -215,6 +215,11 @@ function = "spi"; }; + spi_cs1_pins: spi-cs1-pins { + groups = "spi_cs1"; + function = "spi"; + }; + i2c1_pins: i2c1-pins { groups = "i2c1"; function = "i2c"; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index ca70ff73f171d9..799c75fa7981e2 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -42,7 +42,7 @@ */ interrupts = ; - gic_its: gic-its@18200000 { + gic_its: gic-its@1820000 { compatible = "arm,gic-v3-its"; reg = <0x00 0x01820000 0x00 0x10000>; socionext,synquacer-pre-its = <0x1000000 0x400000>; @@ -67,7 +67,7 @@ reg = <0x0 0x900000 0x0 0x2000>; reg-names = "serdes"; #phy-cells = <2>; - power-domains = <&k3_pds 153>; + power-domains = <&k3_pds 153 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 153 4>, <&k3_clks 153 1>, <&serdes1 AM654_SERDES_LO_REFCLK>; clock-output-names = "serdes0_cmu_refclk", "serdes0_lo_refclk", "serdes0_ro_refclk"; assigned-clocks = <&k3_clks 153 4>, <&serdes0 AM654_SERDES_CMU_REFCLK>; @@ -82,7 +82,7 @@ reg = <0x0 0x910000 0x0 0x2000>; reg-names = "serdes"; #phy-cells = <2>; - power-domains = <&k3_pds 154>; + power-domains = <&k3_pds 154 TI_SCI_PD_EXCLUSIVE>; clocks = <&serdes0 AM654_SERDES_RO_REFCLK>, <&k3_clks 154 1>, <&k3_clks 154 5>; clock-output-names = "serdes1_cmu_refclk", "serdes1_lo_refclk", "serdes1_ro_refclk"; assigned-clocks = <&k3_clks 154 5>, <&serdes1 AM654_SERDES_CMU_REFCLK>; @@ -100,7 +100,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 146>; + power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>; }; main_uart1: serial@2810000 { @@ -110,7 +110,7 @@ reg-io-width = <4>; interrupts = ; clock-frequency = <48000000>; - power-domains = <&k3_pds 147>; + power-domains = <&k3_pds 147 TI_SCI_PD_EXCLUSIVE>; }; main_uart2: serial@2820000 { @@ -120,7 +120,7 @@ reg-io-width = <4>; interrupts = ; clock-frequency = <48000000>; - power-domains = <&k3_pds 148>; + power-domains = <&k3_pds 148 TI_SCI_PD_EXCLUSIVE>; }; main_pmx0: pinmux@11c000 { @@ -147,7 +147,7 @@ #size-cells = <0>; clock-names = "fck"; clocks = <&k3_clks 110 1>; - power-domains = <&k3_pds 110>; + power-domains = <&k3_pds 110 TI_SCI_PD_EXCLUSIVE>; }; main_i2c1: i2c@2010000 { @@ -158,7 +158,7 @@ #size-cells = <0>; clock-names = "fck"; clocks = <&k3_clks 111 1>; - power-domains = <&k3_pds 111>; + power-domains = <&k3_pds 111 TI_SCI_PD_EXCLUSIVE>; }; main_i2c2: i2c@2020000 { @@ -169,7 +169,7 @@ #size-cells = <0>; clock-names = "fck"; clocks = <&k3_clks 112 1>; - power-domains = <&k3_pds 112>; + power-domains = <&k3_pds 112 TI_SCI_PD_EXCLUSIVE>; }; main_i2c3: i2c@2030000 { @@ -180,14 +180,14 @@ #size-cells = <0>; clock-names = "fck"; clocks = <&k3_clks 113 1>; - power-domains = <&k3_pds 113>; + power-domains = <&k3_pds 113 TI_SCI_PD_EXCLUSIVE>; }; ecap0: pwm@3100000 { compatible = "ti,am654-ecap", "ti,am3352-ecap"; #pwm-cells = <3>; reg = <0x0 0x03100000 0x0 0x60>; - power-domains = <&k3_pds 39>; + power-domains = <&k3_pds 39 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 39 0>; clock-names = "fck"; }; @@ -197,7 +197,7 @@ reg = <0x0 0x2100000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 137 1>; - power-domains = <&k3_pds 137>; + power-domains = <&k3_pds 137 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; }; @@ -207,7 +207,7 @@ reg = <0x0 0x2110000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 138 1>; - power-domains = <&k3_pds 138>; + power-domains = <&k3_pds 138 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; assigned-clocks = <&k3_clks 137 1>; @@ -219,7 +219,7 @@ reg = <0x0 0x2120000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 139 1>; - power-domains = <&k3_pds 139>; + power-domains = <&k3_pds 139 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; }; @@ -229,7 +229,7 @@ reg = <0x0 0x2130000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 140 1>; - power-domains = <&k3_pds 140>; + power-domains = <&k3_pds 140 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; }; @@ -239,7 +239,7 @@ reg = <0x0 0x2140000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 141 1>; - power-domains = <&k3_pds 141>; + power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; }; @@ -247,7 +247,7 @@ sdhci0: sdhci@4f80000 { compatible = "ti,am654-sdhci-5.1"; reg = <0x0 0x4f80000 0x0 0x260>, <0x0 0x4f90000 0x0 0x134>; - power-domains = <&k3_pds 47>; + power-domains = <&k3_pds 47 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 47 0>, <&k3_clks 47 1>; clock-names = "clk_ahb", "clk_xin"; interrupts = ; @@ -306,7 +306,7 @@ ranges = <0x0 0x0 0x4000000 0x20000>; interrupts = ; dma-coherent; - power-domains = <&k3_pds 151>; + power-domains = <&k3_pds 151 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>; assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ <&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */ @@ -345,7 +345,7 @@ ranges = <0x0 0x0 0x4020000 0x20000>; interrupts = ; dma-coherent; - power-domains = <&k3_pds 152>; + power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 152 2>; assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ @@ -413,6 +413,12 @@ ti,sci-rm-range-vint = <0x0>; ti,sci-rm-range-global-event = <0x1>; }; + + hwspinlock: spinlock@30e00000 { + compatible = "ti,am654-hwspinlock"; + reg = <0x00 0x30e00000 0x00 0x1000>; + #hwlock-cells = <1>; + }; }; main_gpio0: main_gpio0@600000 { @@ -451,7 +457,7 @@ compatible = "ti,am654-pcie-rc"; reg = <0x0 0x5500000 0x0 0x1000>, <0x0 0x5501000 0x0 0x1000>, <0x0 0x10000000 0x0 0x2000>, <0x0 0x5506000 0x0 0x1000>; reg-names = "app", "dbics", "config", "atu"; - power-domains = <&k3_pds 120>; + power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>; #address-cells = <3>; #size-cells = <2>; ranges = <0x81000000 0 0 0x0 0x10020000 0 0x00010000 @@ -470,7 +476,7 @@ compatible = "ti,am654-pcie-ep"; reg = <0x0 0x5500000 0x0 0x1000>, <0x0 0x5501000 0x0 0x1000>, <0x0 0x10000000 0x0 0x8000000>, <0x0 0x5506000 0x0 0x1000>; reg-names = "app", "dbics", "addr_space", "atu"; - power-domains = <&k3_pds 120>; + power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>; ti,syscon-pcie-mode = <&pcie0_mode>; num-ib-windows = <16>; num-ob-windows = <16>; @@ -483,7 +489,7 @@ compatible = "ti,am654-pcie-rc"; reg = <0x0 0x5600000 0x0 0x1000>, <0x0 0x5601000 0x0 0x1000>, <0x0 0x18000000 0x0 0x2000>, <0x0 0x5606000 0x0 0x1000>; reg-names = "app", "dbics", "config", "atu"; - power-domains = <&k3_pds 121>; + power-domains = <&k3_pds 121 TI_SCI_PD_EXCLUSIVE>; #address-cells = <3>; #size-cells = <2>; ranges = <0x81000000 0 0 0x0 0x18020000 0 0x00010000 @@ -502,7 +508,7 @@ compatible = "ti,am654-pcie-ep"; reg = <0x0 0x5600000 0x0 0x1000>, <0x0 0x5601000 0x0 0x1000>, <0x0 0x18000000 0x0 0x4000000>, <0x0 0x5606000 0x0 0x1000>; reg-names = "app", "dbics", "addr_space", "atu"; - power-domains = <&k3_pds 121>; + power-domains = <&k3_pds 121 TI_SCI_PD_EXCLUSIVE>; ti,syscon-pcie-mode = <&pcie1_mode>; num-ib-windows = <16>; num-ob-windows = <16>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index afc29eaa263839..7bdf5342f58f33 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -14,7 +14,7 @@ interrupts = ; clock-frequency = <96000000>; current-speed = <115200>; - power-domains = <&k3_pds 149>; + power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>; }; mcu_ram: sram@41c00000 { @@ -33,7 +33,7 @@ #size-cells = <0>; clock-names = "fck"; clocks = <&k3_clks 114 1>; - power-domains = <&k3_pds 114>; + power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>; }; mcu_spi0: spi@40300000 { @@ -41,7 +41,7 @@ reg = <0x0 0x40300000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 142 1>; - power-domains = <&k3_pds 142>; + power-domains = <&k3_pds 142 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; }; @@ -51,7 +51,7 @@ reg = <0x0 0x40310000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 143 1>; - power-domains = <&k3_pds 143>; + power-domains = <&k3_pds 143 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; }; @@ -61,7 +61,7 @@ reg = <0x0 0x40320000 0x0 0x400>; interrupts = ; clocks = <&k3_clks 144 1>; - power-domains = <&k3_pds 144>; + power-domains = <&k3_pds 144 TI_SCI_PD_EXCLUSIVE>; #address-cells = <1>; #size-cells = <0>; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi index 9cf2c0849a24c6..f4227e2743f26a 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi @@ -20,7 +20,7 @@ k3_pds: power-controller { compatible = "ti,sci-pm-domain"; - #power-domain-cells = <1>; + #power-domain-cells = <2>; }; k3_clks: clocks { @@ -50,7 +50,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 150>; + power-domains = <&k3_pds 150 TI_SCI_PD_EXCLUSIVE>; }; wkup_i2c0: i2c@42120000 { @@ -61,7 +61,7 @@ #size-cells = <0>; clock-names = "fck"; clocks = <&k3_clks 115 1>; - power-domains = <&k3_pds 115>; + power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>; }; intr_wkup_gpio: interrupt-controller2 { diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi index 82edf10b237810..6dfccd5d56c848 100644 --- a/arch/arm64/boot/dts/ti/k3-am65.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi @@ -9,6 +9,7 @@ #include #include #include +#include / { model = "Texas Instruments K3 AM654 SoC"; diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts index 52c245d36db921..1102b84f853d71 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts @@ -151,6 +151,7 @@ &main_uart0 { pinctrl-names = "default"; pinctrl-0 = <&main_uart0_pins_default>; + power-domains = <&k3_pds 146 TI_SCI_PD_SHARED>; }; &wkup_i2c0 { diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts index c680123f067c91..d2894d55fbbeda 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts @@ -6,12 +6,49 @@ /dts-v1/; #include "k3-j721e-som-p0.dtsi" +#include +#include / { chosen { stdout-path = "serial2:115200n8"; bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000"; }; + + gpio_keys: gpio-keys { + compatible = "gpio-keys"; + autorepeat; + pinctrl-names = "default"; + pinctrl-0 = <&sw10_button_pins_default &sw11_button_pins_default>; + + sw10: sw10 { + label = "GPIO Key USER1"; + linux,code = ; + gpios = <&main_gpio0 0 GPIO_ACTIVE_LOW>; + }; + + sw11: sw11 { + label = "GPIO Key USER2"; + linux,code = ; + gpios = <&wkup_gpio0 7 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&main_pmx0 { + sw10_button_pins_default: sw10_button_pins_default { + pinctrl-single,pins = < + J721E_IOPAD(0x0, PIN_INPUT, 7) /* (AC18) EXTINTn.GPIO0_0 */ + >; + }; +}; + +&wkup_pmx0 { + sw11_button_pins_default: sw11_button_pins_default { + pinctrl-single,pins = < + J721E_WKUP_IOPAD(0xcc, PIN_INPUT, 7) /* (G28) WKUP_GPIO0_7 */ + >; + }; }; &wkup_uart0 { @@ -19,6 +56,10 @@ status = "disabled"; }; +&main_uart0 { + power-domains = <&k3_pds 146 TI_SCI_PD_SHARED>; +}; + &main_uart3 { /* UART not brought out */ status = "disabled"; @@ -48,3 +89,31 @@ /* UART not brought out */ status = "disabled"; }; + +&main_gpio2 { + status = "disabled"; +}; + +&main_gpio3 { + status = "disabled"; +}; + +&main_gpio4 { + status = "disabled"; +}; + +&main_gpio5 { + status = "disabled"; +}; + +&main_gpio6 { + status = "disabled"; +}; + +&main_gpio7 { + status = "disabled"; +}; + +&wkup_gpio1 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index a01308142f77d1..698ef9a1d5b75c 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -31,7 +31,7 @@ /* vcpumntirq: virtual CPU interface maintenance interrupt */ interrupts = ; - gic_its: gic-its@18200000 { + gic_its: gic-its@1820000 { compatible = "arm,gic-v3-its"; reg = <0x00 0x01820000 0x00 0x10000>; socionext,synquacer-pre-its = <0x1000000 0x400000>; @@ -89,6 +89,12 @@ ti,sci-rm-range-vint = <0xa>; ti,sci-rm-range-global-event = <0xd>; }; + + hwspinlock: spinlock@30e00000 { + compatible = "ti,am654-hwspinlock"; + reg = <0x00 0x30e00000 0x00 0x1000>; + #hwlock-cells = <1>; + }; }; secure_proxy_main: mailbox@32c00000 { @@ -119,7 +125,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 146>; + power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 146 0>; clock-names = "fclk"; }; @@ -132,7 +138,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 278>; + power-domains = <&k3_pds 278 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 278 0>; clock-names = "fclk"; }; @@ -145,7 +151,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 279>; + power-domains = <&k3_pds 279 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 279 0>; clock-names = "fclk"; }; @@ -158,7 +164,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 280>; + power-domains = <&k3_pds 280 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 280 0>; clock-names = "fclk"; }; @@ -171,7 +177,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 281>; + power-domains = <&k3_pds 281 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 281 0>; clock-names = "fclk"; }; @@ -184,7 +190,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 282>; + power-domains = <&k3_pds 282 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 282 0>; clock-names = "fclk"; }; @@ -197,7 +203,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 283>; + power-domains = <&k3_pds 283 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 283 0>; clock-names = "fclk"; }; @@ -210,7 +216,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 284>; + power-domains = <&k3_pds 284 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 284 0>; clock-names = "fclk"; }; @@ -223,7 +229,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 285>; + power-domains = <&k3_pds 285 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 285 0>; clock-names = "fclk"; }; @@ -236,8 +242,140 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 286>; + power-domains = <&k3_pds 286 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 286 0>; clock-names = "fclk"; }; + + main_gpio0: gpio@600000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00600000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <105 0>, <105 1>, <105 2>, <105 3>, + <105 4>, <105 5>, <105 6>, <105 7>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <128>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 105 0>; + clock-names = "gpio"; + }; + + main_gpio1: gpio@601000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00601000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <106 0>, <106 1>, <106 2>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <36>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 106 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 106 0>; + clock-names = "gpio"; + }; + + main_gpio2: gpio@610000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00610000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <107 0>, <107 1>, <107 2>, <107 3>, + <107 4>, <107 5>, <107 6>, <107 7>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <128>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 107 0>; + clock-names = "gpio"; + }; + + main_gpio3: gpio@611000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00611000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <108 0>, <108 1>, <108 2>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <36>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 108 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 108 0>; + clock-names = "gpio"; + }; + + main_gpio4: gpio@620000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00620000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <109 0>, <109 1>, <109 2>, <109 3>, + <109 4>, <109 5>, <109 6>, <109 7>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <128>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 109 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 109 0>; + clock-names = "gpio"; + }; + + main_gpio5: gpio@621000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00621000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <110 0>, <110 1>, <110 2>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <36>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 110 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 110 0>; + clock-names = "gpio"; + }; + + main_gpio6: gpio@630000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00630000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <111 0>, <111 1>, <111 2>, <111 3>, + <111 4>, <111 5>, <111 6>, <111 7>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <128>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 111 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 111 0>; + clock-names = "gpio"; + }; + + main_gpio7: gpio@631000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x00631000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&main_gpio_intr>; + interrupts = <112 0>, <112 1>, <112 2>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <36>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 112 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 112 0>; + clock-names = "gpio"; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi index 07b58eeebceb0b..555dc7b7aedc96 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi @@ -20,7 +20,7 @@ k3_pds: power-controller { compatible = "ti,sci-pm-domain"; - #power-domain-cells = <1>; + #power-domain-cells = <2>; }; k3_clks: clocks { @@ -59,7 +59,7 @@ interrupts = ; clock-frequency = <48000000>; current-speed = <115200>; - power-domains = <&k3_pds 287>; + power-domains = <&k3_pds 287 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 287 0>; clock-names = "fclk"; }; @@ -72,7 +72,7 @@ interrupts = ; clock-frequency = <96000000>; current-speed = <115200>; - power-domains = <&k3_pds 149>; + power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 149 0>; clock-names = "fclk"; }; @@ -87,4 +87,38 @@ ti,sci-dst-id = <14>; ti,sci-rm-range-girq = <0x5>; }; + + wkup_gpio0: gpio@42110000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x42110000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&wkup_gpio_intr>; + interrupts = <113 0>, <113 1>, <113 2>, + <113 3>, <113 4>, <113 5>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <84>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 113 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 113 0>; + clock-names = "gpio"; + }; + + wkup_gpio1: gpio@42100000 { + compatible = "ti,j721e-gpio", "ti,keystone-gpio"; + reg = <0x0 0x42100000 0x0 0x100>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&wkup_gpio_intr>; + interrupts = <114 0>, <114 1>, <114 2>, + <114 3>, <114 4>, <114 5>; + interrupt-controller; + #interrupt-cells = <2>; + ti,ngpio = <84>; + ti,davinci-gpio-unbanked = <0>; + power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 114 0>; + clock-names = "gpio"; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi index f8dd74b17bfb9a..43ea1ba9792203 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi @@ -8,6 +8,7 @@ #include #include #include +#include / { model = "Texas Instruments K3 J721E SoC"; diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index a1398f2f9994fc..f9bef42c1411af 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -62,7 +62,7 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE #define __XCHG_GEN(sfx) \ -static inline unsigned long __xchg##sfx(unsigned long x, \ +static __always_inline unsigned long __xchg##sfx(unsigned long x, \ volatile void *ptr, \ int size) \ { \ @@ -148,7 +148,7 @@ __CMPXCHG_DBL(_mb) #undef __CMPXCHG_DBL #define __CMPXCHG_GEN(sfx) \ -static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ +static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ unsigned long new, \ int size) \ @@ -255,7 +255,7 @@ __CMPWAIT_CASE( , , 64); #undef __CMPWAIT_CASE #define __CMPWAIT_GEN(sfx) \ -static inline void __cmpwait##sfx(volatile void *ptr, \ +static __always_inline void __cmpwait##sfx(volatile void *ptr, \ unsigned long val, \ int size) \ { \ diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 5ab5200b2bdc8e..d48667b04c4117 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -14,6 +14,19 @@ #define MCOUNT_ADDR ((unsigned long)_mcount) #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE +/* + * Currently, gcc tends to save the link register after the local variables + * on the stack. This causes the max stack tracer to report the function + * frame sizes for the wrong functions. By defining + * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect + * to find the return address on the stack after the local variables have + * been set up. + * + * Note, this may change in the future, and we will need to deal with that + * if it were to happen. + */ +#define ARCH_FTRACE_SHIFT_STACK_TRACER 1 + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index 507d0ee6bc6900..06d880b3526c3a 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -8,6 +8,8 @@ #ifndef __ASM_SYSCALL_WRAPPER_H #define __ASM_SYSCALL_WRAPPER_H +struct pt_regs; + #define SC_ARM64_REGS_TO_ARGS(x, ...) \ __MAP(x,__SC_ARGS \ ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ @@ -35,8 +37,11 @@ ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused) -#define COND_SYSCALL_COMPAT(name) \ - cond_syscall(__arm64_compat_sys_##name); +#define COND_SYSCALL_COMPAT(name) \ + asmlinkage long __weak __arm64_compat_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } #define COMPAT_SYS_NI(name) \ SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers); @@ -70,7 +75,11 @@ #endif #ifndef COND_SYSCALL -#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name) +#define COND_SYSCALL(name) \ + asmlinkage long __weak __arm64_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } #endif #ifndef SYS_NI diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9711cf73092948..685a3df126cab9 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -19,6 +19,7 @@ config IA64 select PCI_DOMAINS if PCI select PCI_MSI select PCI_SYSCALL if PCI + select HAVE_ASM_MODVERSIONS select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_EXIT_THREAD select HAVE_IDE diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index e0bb2b6aaa35e9..32240000dc0c85 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -20,7 +20,7 @@ CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ OBJCOPYFLAGS := --strip-all LDFLAGS_vmlinux := -static -KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/ia64/module.lds +KBUILD_LDS_MODULE += $(srctree)/arch/ia64/module.lds KBUILD_AFLAGS_KERNEL := -mconstant-gp EXTRA := diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 935599893d3ed1..6663f1741798e8 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -12,6 +12,7 @@ config M68K select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE select HAVE_IDE select HAVE_AOUT if MMU + select HAVE_ASM_MODVERSIONS select HAVE_DEBUG_BUGVERBOSE select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index 482513b9af2c5d..5d92883840969b 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -73,7 +73,7 @@ KBUILD_AFLAGS += -D__uClinux__ endif KBUILD_LDFLAGS := -m m68kelf -KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds +KBUILD_LDS_MODULE += $(srctree)/arch/m68k/kernel/module.lds ifdef CONFIG_SUN3 LDFLAGS_vmlinux = -N diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index aff1cadeea4375..904c096fa4da62 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -44,6 +44,7 @@ config MIPS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT + select HAVE_ASM_MODVERSIONS select HAVE_EBPF_JIT if (!CPU_MICROMIPS) select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 3ce4dd578370de..528bd73d530a37 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -160,7 +160,7 @@ targets += vmlinux.lzo.itb quiet_cmd_itb-image = ITB $@ cmd_itb-image = \ env PATH="$(objtree)/scripts/dtc:$(PATH)" \ - $(CONFIG_SHELL) $(MKIMAGE) \ + $(BASH) $(MKIMAGE) \ -D "-I dts -O dtb -p 500 \ --include $(objtree)/arch/mips \ --warning no-unit_address_vs_reg" \ diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 3b77d729057f94..36b834f1c93303 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -60,7 +60,7 @@ KBUILD_CFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 \ -DFTRACE_PATCHABLE_FUNCTION_SIZE=$(NOP_COUNT) CC_FLAGS_FTRACE := -fpatchable-function-entry=$(NOP_COUNT),$(shell echo $$(($(NOP_COUNT)-1))) -KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/parisc/kernel/module.lds +KBUILD_LDS_MODULE += $(srctree)/arch/parisc/kernel/module.lds endif OBJCOPY_FLAGS =-O binary -R .note -R .comment -S diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 891cd23f9c8b05..3e56c9c2f16eed 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -128,14 +128,15 @@ config PPC select ARCH_HAS_HUGEPD if HUGETLB_PAGE select ARCH_HAS_MMIOWB if PPC64 select ARCH_HAS_PHYS_TO_DMA - select ARCH_HAS_PMEM_API if PPC64 + select ARCH_HAS_PMEM_API select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MEMBARRIER_CALLBACKS - select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64 + select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64 + select ARCH_HAS_UACCESS_FLUSHCACHE + select ARCH_HAS_UACCESS_MCSAFE if PPC64 select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK @@ -177,11 +178,13 @@ config PPC select HAVE_ARCH_NVRAM_OPS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK + select HAVE_ASM_MODVERSIONS select HAVE_C_RECORDMCOUNT select HAVE_CBPF_JIT if !PPC64 select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) select HAVE_CONTEXT_TRACKING if PPC64 + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW select HAVE_DYNAMIC_FTRACE @@ -567,7 +570,7 @@ config CRASH_DUMP config FA_DUMP bool "Firmware-assisted dump" - depends on PPC64 && PPC_RTAS + depends on PPC64 && (PPC_RTAS || PPC_POWERNV) select CRASH_CORE select CRASH_DUMP help @@ -578,7 +581,26 @@ config FA_DUMP is meant to be a kdump replacement offering robustness and speed not possible without system firmware assistance. - If unsure, say "N" + If unsure, say "y". Only special kernels like petitboot may + need to say "N" here. + +config PRESERVE_FA_DUMP + bool "Preserve Firmware-assisted dump" + depends on PPC64 && PPC_POWERNV && !FA_DUMP + help + On a kernel with FA_DUMP disabled, this option helps to preserve + crash data from a previously crash'ed kernel. Useful when the next + memory preserving kernel boot would process this crash data. + Petitboot kernel is the typical usecase for this option. + +config OPAL_CORE + bool "Export OPAL memory as /sys/firmware/opal/core" + depends on PPC64 && PPC_POWERNV + help + This option uses the MPIPL support in firmware to provide an + ELF core of OPAL memory after a crash. The ELF core is exported + as /sys/firmware/opal/core file which is helpful in debugging + OPAL crashes using GDB. config IRQ_ALL_CPUS bool "Distribute interrupts on all CPUs by default" @@ -1139,18 +1161,6 @@ config TASK_SIZE default "0x80000000" if PPC_8xx default "0xc0000000" -config CONSISTENT_SIZE_BOOL - bool "Set custom consistent memory pool size" - depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE - help - This option allows you to set the size of the - consistent memory pool. This pool of virtual memory - is used to make consistent memory allocations. - -config CONSISTENT_SIZE - hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL - default "0x00200000" if NOT_COHERENT_CACHE - config PIN_TLB bool "Pinned Kernel TLBs (860 ONLY)" depends on ADVANCED_OPTIONS && PPC_8xx && \ diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 403f7e193833ad..83522c9fc7b66a 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -65,7 +65,7 @@ UTS_MACHINE := $(subst $(space),,$(machine-y)) ifdef CONFIG_PPC32 KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o else -KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/powerpc/kernel/module.lds +KBUILD_LDS_MODULE += $(srctree)/arch/powerpc/kernel/module.lds ifeq ($(call ld-ifversion, -ge, 225000000, y),y) # Have the linker provide sfpr if possible. # There is a corresponding test in arch/powerpc/lib/Makefile @@ -110,7 +110,6 @@ ifeq ($(HAS_BIARCH),y) KBUILD_CFLAGS += -m$(BITS) KBUILD_AFLAGS += -m$(BITS) -Wl,-a$(BITS) KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION) -KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET) endif cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard=tls diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink index 83f8e5ba2722c2..134f12f89b92b0 100644 --- a/arch/powerpc/Makefile.postlink +++ b/arch/powerpc/Makefile.postlink @@ -18,7 +18,7 @@ quiet_cmd_relocs_check = CHKREL $@ ifdef CONFIG_PPC_BOOK3S_64 cmd_relocs_check = \ $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" ; \ - $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@" + $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@" else cmd_relocs_check = \ $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index 102cc546444d23..a9d209135975da 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c @@ -146,6 +146,46 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen, return (struct addr_range){(void *)initrd_addr, initrd_size}; } +#ifdef __powerpc64__ +static void prep_esm_blob(struct addr_range vmlinux, void *chosen) +{ + unsigned long esm_blob_addr, esm_blob_size; + + /* Do we have an ESM (Enter Secure Mode) blob? */ + if (_esm_blob_end <= _esm_blob_start) + return; + + printf("Attached ESM blob at 0x%p-0x%p\n\r", + _esm_blob_start, _esm_blob_end); + esm_blob_addr = (unsigned long)_esm_blob_start; + esm_blob_size = _esm_blob_end - _esm_blob_start; + + /* + * If the ESM blob is too low it will be clobbered when the + * kernel relocates to its final location. In this case, + * allocate a safer place and move it. + */ + if (esm_blob_addr < vmlinux.size) { + void *old_addr = (void *)esm_blob_addr; + + printf("Allocating 0x%lx bytes for esm_blob ...\n\r", + esm_blob_size); + esm_blob_addr = (unsigned long)malloc(esm_blob_size); + if (!esm_blob_addr) + fatal("Can't allocate memory for ESM blob !\n\r"); + printf("Relocating ESM blob 0x%lx <- 0x%p (0x%lx bytes)\n\r", + esm_blob_addr, old_addr, esm_blob_size); + memmove((void *)esm_blob_addr, old_addr, esm_blob_size); + } + + /* Tell the kernel ESM blob address via device tree. */ + setprop_val(chosen, "linux,esm-blob-start", (u32)(esm_blob_addr)); + setprop_val(chosen, "linux,esm-blob-end", (u32)(esm_blob_addr + esm_blob_size)); +} +#else +static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { } +#endif + /* A buffer that may be edited by tools operating on a zImage binary so as to * edit the command line passed to vmlinux (by setting /chosen/bootargs). * The buffer is put in it's own section so that tools may locate it easier. @@ -214,6 +254,7 @@ void start(void) vmlinux = prep_kernel(); initrd = prep_initrd(vmlinux, chosen, loader_info.initrd_addr, loader_info.initrd_size); + prep_esm_blob(vmlinux, chosen); prep_cmdline(chosen); printf("Finalizing device tree..."); diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index cd043726ed88c5..e0606766480f75 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h @@ -251,6 +251,8 @@ extern char _initrd_start[]; extern char _initrd_end[]; extern char _dtb_start[]; extern char _dtb_end[]; +extern char _esm_blob_start[]; +extern char _esm_blob_end[]; static inline __attribute__((const)) int __ilog2_u32(u32 n) diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 5148ac271f28f0..ed6266367bc022 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -13,6 +13,7 @@ # -i initrd specify initrd file # -d devtree specify device-tree blob # -s tree.dts specify device-tree source file (needs dtc installed) +# -e esm_blob specify ESM blob for secure images # -c cache $kernel.strip.gz (use if present & newer, else make) # -C prefix specify command prefix for cross-building tools # (strip, objcopy, ld) @@ -37,6 +38,7 @@ platform=of initrd= dtb= dts= +esm_blob= cacheit= binary= compression=.gz @@ -60,9 +62,9 @@ tmpdir=. usage() { echo 'Usage: wrapper [-o output] [-p platform] [-i initrd]' >&2 - echo ' [-d devtree] [-s tree.dts] [-c] [-C cross-prefix]' >&2 - echo ' [-D datadir] [-W workingdir] [-Z (gz|xz|none)]' >&2 - echo ' [--no-compression] [vmlinux]' >&2 + echo ' [-d devtree] [-s tree.dts] [-e esm_blob]' >&2 + echo ' [-c] [-C cross-prefix] [-D datadir] [-W workingdir]' >&2 + echo ' [-Z (gz|xz|none)] [--no-compression] [vmlinux]' >&2 exit 1 } @@ -105,6 +107,11 @@ while [ "$#" -gt 0 ]; do [ "$#" -gt 0 ] || usage dtb="$1" ;; + -e) + shift + [ "$#" -gt 0 ] || usage + esm_blob="$1" + ;; -s) shift [ "$#" -gt 0 ] || usage @@ -218,9 +225,16 @@ objflags=-S tmp=$tmpdir/zImage.$$.o ksection=.kernel:vmlinux.strip isection=.kernel:initrd +esection=.kernel:esm_blob link_address='0x400000' make_space=y + +if [ -n "$esm_blob" -a "$platform" != "pseries" ]; then + echo "ESM blob not support on non-pseries platforms" >&2 + exit 1 +fi + case "$platform" in of) platformo="$object/of.o $object/epapr.o" @@ -477,6 +491,10 @@ if [ -n "$dtb" ]; then fi fi +if [ -n "$esm_blob" ]; then + addsec $tmp "$esm_blob" $esection +fi + if [ "$platform" != "miboot" ]; then if [ -n "$link_address" ] ; then text_start="-Ttext $link_address" diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S index 4ac1e36edfe7f0..a21f3a76e06fce 100644 --- a/arch/powerpc/boot/zImage.lds.S +++ b/arch/powerpc/boot/zImage.lds.S @@ -68,6 +68,14 @@ SECTIONS _initrd_end = .; } + . = ALIGN(4096); + .kernel:esm_blob : + { + _esm_blob_start = .; + *(.kernel:esm_blob) + _esm_blob_end = .; + } + #ifdef CONFIG_PPC64_BOOT_WRAPPER . = ALIGN(256); .got : diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 7e6654848531a3..4e6e95f9264607 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -20,7 +20,6 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_PMAC=y -CONFIG_PPC601_SYNC_FIX=y CONFIG_GEN_RTC=y CONFIG_HIGHMEM=y CONFIG_BINFMT_MISC=m diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 34219d555e8af3..6658cceb928c6e 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -38,7 +38,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y -CONFIG_SCOM_DEBUGFS=y +# CONFIG_SCOM_DEBUGFS is not set CONFIG_OPAL_PRD=y CONFIG_PPC_MEMTRACE=y # CONFIG_PPC_PSERIES is not set diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index 8f136b52198b6e..a5f683aed3287e 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@ -84,4 +84,3 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y -CONFIG_PPC4xx_OCM=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index dc83fefa04f7cd..b250e6f5a7ca7b 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -29,6 +29,7 @@ CONFIG_DTL=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y CONFIG_IBMEBUS=y +CONFIG_PPC_SVM=y CONFIG_PPC_MAPLE=y CONFIG_PPC_PASEMI=y CONFIG_PPC_PASEMI_IOMMU=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 38abc9c1770a01..26126b4d4de332 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -42,6 +42,7 @@ CONFIG_DTL=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y CONFIG_IBMEBUS=y +CONFIG_PPC_SVM=y # CONFIG_PPC_PMAC is not set CONFIG_RTAS_FLASH=m CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index 557b530b2f700d..1253482a67c0d9 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -213,6 +213,7 @@ CONFIG_IPMI_WATCHDOG=y CONFIG_HW_RANDOM=y CONFIG_TCG_TPM=y CONFIG_TCG_TIS_I2C_NUVOTON=y +# CONFIG_DEVPORT is not set CONFIG_I2C=y # CONFIG_I2C_COMPAT is not set CONFIG_I2C_CHARDEV=y diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index ec1c97a8e8cbf7..8561498e653cb0 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -34,6 +35,16 @@ extern struct static_key hcall_tracepoint_key; void __trace_hcall_entry(unsigned long opcode, unsigned long *args); void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf); +/* Ultravisor */ +#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM) +long ucall_norets(unsigned long opcode, ...); +#else +static inline long ucall_norets(unsigned long opcode, ...) +{ + return U_NOT_AVAILABLE; +} +#endif + /* OPAL */ int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3, int64_t a4, int64_t a5, int64_t a6, int64_t a7, @@ -123,7 +134,8 @@ extern int __ucmpdi2(u64, u64); /* tracing */ void _mcount(void); -unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, + unsigned long sp); void pnv_power9_force_smt4_catch(void); void pnv_power9_force_smt4_release(void); diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 838de59f6754b8..0796533d37dd52 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -148,23 +148,21 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #include -#ifdef CONFIG_HIGHMEM -#define KVIRT_TOP PKMAP_BASE -#else -#define KVIRT_TOP FIXADDR_START -#endif - /* * ioremap_bot starts at that address. Early ioremaps move down from there, * until mem_init() at which point this becomes the top of the vmalloc * and ioremap space */ -#ifdef CONFIG_NOT_COHERENT_CACHE -#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) +#ifdef CONFIG_HIGHMEM +#define IOREMAP_TOP PKMAP_BASE #else -#define IOREMAP_TOP KVIRT_TOP +#define IOREMAP_TOP FIXADDR_START #endif +/* PPC32 shares vmalloc area with ioremap */ +#define IOREMAP_START VMALLOC_START +#define IOREMAP_END VMALLOC_END + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 16MB value just means that there will be a 64MB "hole" after the @@ -201,8 +199,6 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #include #include -extern unsigned long ioremap_bot; - /* Bits to mask out from a PGD to get to the PUD page */ #define PGD_MASKED_BITS 0 diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 23b83d3593e2a5..bb3deb76c951ba 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -206,7 +206,6 @@ extern int mmu_io_psize; void mmu_early_init_devtree(void); void hash__early_init_devtree(void); void radix__early_init_devtree(void); -extern void radix_init_native(void); extern void hash__early_init_mmu(void); extern void radix__early_init_mmu(void); static inline void early_init_mmu(void) @@ -238,9 +237,6 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, first_memblock_size); } -extern int (*register_process_table)(unsigned long base, unsigned long page_size, - unsigned long tbl_size); - #ifdef CONFIG_PPC_PSERIES extern void radix_init_pseries(void); #else diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 8308f32e97823a..b01624e5c46714 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -289,7 +289,6 @@ extern unsigned long __kernel_io_end; #define KERN_IO_END __kernel_io_end extern struct page *vmemmap; -extern unsigned long ioremap_bot; extern unsigned long pci_io_base; #endif /* __ASSEMBLY__ */ @@ -317,6 +316,7 @@ extern unsigned long pci_io_base; #define PHB_IO_BASE (ISA_IO_END) #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define IOREMAP_BASE (PHB_IO_END) +#define IOREMAP_START (ioremap_bot) #define IOREMAP_END (KERN_IO_END) /* Advertise special mapping type for AGP */ @@ -608,8 +608,10 @@ static inline bool pte_access_permitted(pte_t pte, bool write) */ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { - return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) | - pgprot_val(pgprot)); + VM_BUG_ON(pfn >> (64 - PAGE_SHIFT)); + VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK); + + return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot)); } static inline unsigned long pte_pfn(pte_t pte) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index e04a839cb5b9f9..574eca33f8930d 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -266,9 +266,6 @@ extern void radix__vmemmap_remove_mapping(unsigned long start, extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int psz); -extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa, - unsigned long size, pgprot_t prot, int nid); - static inline unsigned long radix__get_tree_size(void) { unsigned long rts_field; diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 05147cecb8df1c..4ce795d3037738 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -17,8 +17,8 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid, unsigned long addr, unsigned long page_size); extern void radix__flush_pwc_lpid(unsigned int lpid); -extern void radix__flush_tlb_lpid(unsigned int lpid); -extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid); +extern void radix__flush_all_lpid(unsigned int lpid); +extern void radix__flush_all_lpid_guest(unsigned int lpid); #else static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }; static inline void radix__flush_tlb_lpid_page(unsigned int lpid, @@ -31,11 +31,7 @@ static inline void radix__flush_pwc_lpid(unsigned int lpid) { WARN_ON(1); } -static inline void radix__flush_tlb_lpid(unsigned int lpid) -{ - WARN_ON(1); -} -static inline void radix__local_flush_tlb_lpid_guest(unsigned int lpid) +static inline void radix__flush_all_lpid(unsigned int lpid) { WARN_ON(1); } @@ -73,6 +69,4 @@ extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); extern void radix__flush_tlb_all(void); -extern void radix__local_flush_tlb_lpid(unsigned int lpid); - #endif diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index ebf572ea621eec..7aa8195b6cff86 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -162,4 +162,13 @@ static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long addre radix__flush_tlb_pwc(tlb, address); } + +extern bool tlbie_capable; +extern bool tlbie_enabled; + +static inline bool cputlb_use_tlbie(void) +{ + return tlbie_enabled; +} + #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h index 6436b65ac7bc66..0e1263455d737f 100644 --- a/arch/powerpc/include/asm/book3s/pgtable.h +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -26,5 +26,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #define __HAVE_PHYS_MEM_ACCESS_PROT +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index fed7e6241349ca..f47e6ff6554d0f 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -5,14 +5,6 @@ #include -/* - * Define an illegal instr to trap on the bug. - * We don't use 0 because that marks the end of a function - * in the ELF ABI. That's "Boo Boo" in case you wonder... - */ -#define BUG_OPCODE .long 0x00b00b00 /* For asm */ -#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */ - #ifdef CONFIG_BUG #ifdef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index d05f0c28e51595..a1ebcbc3931f2a 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -145,12 +145,10 @@ static inline void cpu_feature_keys_init(void) { } /* Definitions for features that only exist on 32-bit chips */ #ifdef CONFIG_PPC32 -#define CPU_FTR_601 ASM_CONST(0x00001000) #define CPU_FTR_L2CR ASM_CONST(0x00002000) #define CPU_FTR_SPEC7450 ASM_CONST(0x00004000) #define CPU_FTR_TAU ASM_CONST(0x00008000) #define CPU_FTR_CAN_DOZE ASM_CONST(0x00010000) -#define CPU_FTR_USE_RTC ASM_CONST(0x00020000) #define CPU_FTR_L3CR ASM_CONST(0x00040000) #define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00080000) #define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00100000) @@ -160,14 +158,12 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_NEED_COHERENT ASM_CONST(0x01000000) #define CPU_FTR_NO_BTIC ASM_CONST(0x02000000) #define CPU_FTR_PPC_LE ASM_CONST(0x04000000) -#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x08000000) #define CPU_FTR_SPE ASM_CONST(0x10000000) #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x20000000) #define CPU_FTR_INDEXED_DCR ASM_CONST(0x40000000) #else /* CONFIG_PPC32 */ /* Define these to 0 for the sake of tests in common code */ -#define CPU_FTR_601 (0) #define CPU_FTR_PPC_LE (0) #endif @@ -294,8 +290,8 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_MAYBE_CAN_NAP 0 #endif -#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \ - CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_USE_RTC) +#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | \ + CPU_FTR_COHERENT_ICACHE) #define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE) @@ -386,7 +382,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_47X (CPU_FTRS_440x6) #define CPU_FTRS_E200 (CPU_FTR_SPE_COMP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ - CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \ + CPU_FTR_NOEXECUTE | \ CPU_FTR_DEBUG_LVL_EXC) #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ @@ -498,7 +494,9 @@ static inline void cpu_feature_keys_init(void) { } #else enum { CPU_FTRS_POSSIBLE = -#ifdef CONFIG_PPC_BOOK3S_32 +#ifdef CONFIG_PPC_BOOK3S_601 + CPU_FTRS_PPC601 | +#elif defined(CONFIG_PPC_BOOK3S_32) CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU | CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 | CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX | @@ -574,8 +572,10 @@ enum { #else enum { CPU_FTRS_ALWAYS = -#ifdef CONFIG_PPC_BOOK3S_32 - CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU & +#ifdef CONFIG_PPC_BOOK3S_601 + CPU_FTRS_PPC601 & +#elif defined(CONFIG_PPC_BOOK3S_32) + CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU & CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 & CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX & CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 & diff --git a/arch/powerpc/include/asm/current.h b/arch/powerpc/include/asm/current.h index 297827b761697a..bbfb9480041552 100644 --- a/arch/powerpc/include/asm/current.h +++ b/arch/powerpc/include/asm/current.h @@ -16,7 +16,8 @@ static inline struct task_struct *get_current(void) { struct task_struct *task; - __asm__ __volatile__("ld %0,%1(13)" + /* get_current can be cached by the compiler, so no volatile */ + asm ("ld %0,%1(13)" : "=r" (task) : "i" (offsetof(struct paca_struct, __current))); diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 8aa7c76c2130f5..6f9b2a12540a28 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -88,6 +88,19 @@ struct eeh_pe { struct list_head child_list; /* List of PEs below this PE */ struct list_head child; /* Memb. child_list/eeh_phb_pe */ struct list_head edevs; /* List of eeh_dev in this PE */ + +#ifdef CONFIG_STACKTRACE + /* + * Saved stack trace. When we find a PE freeze in eeh_dev_check_failure + * the stack trace is saved here so we can print it in the recovery + * thread if it turns out to due to a real problem rather than + * a hot-remove. + * + * A max of 64 entries might be overkill, but it also might not be. + */ + unsigned long stack_trace[64]; + int trace_entries; +#endif /* CONFIG_STACKTRACE */ }; #define eeh_pe_for_each_dev(pe, edev, tmp) \ @@ -121,6 +134,8 @@ static inline bool eeh_pe_passed(struct eeh_pe *pe) struct eeh_dev { int mode; /* EEH mode */ int class_code; /* Class code of the device */ + int bdfn; /* bdfn of device (for cfg ops) */ + struct pci_controller *controller; int pe_config_addr; /* PE config address */ u32 config_space[16]; /* Saved PCI config space */ int pcix_cap; /* Saved PCIx capability */ @@ -136,6 +151,17 @@ struct eeh_dev { struct pci_dev *physfn; /* Associated SRIOV PF */ }; +/* "fmt" must be a simple literal string */ +#define EEH_EDEV_PRINT(level, edev, fmt, ...) \ + pr_##level("PCI %04x:%02x:%02x.%x#%04x: EEH: " fmt, \ + (edev)->controller->global_number, PCI_BUSNO((edev)->bdfn), \ + PCI_SLOT((edev)->bdfn), PCI_FUNC((edev)->bdfn), \ + ((edev)->pe ? (edev)->pe_config_addr : 0xffff), ##__VA_ARGS__) +#define eeh_edev_dbg(edev, fmt, ...) EEH_EDEV_PRINT(debug, (edev), fmt, ##__VA_ARGS__) +#define eeh_edev_info(edev, fmt, ...) EEH_EDEV_PRINT(info, (edev), fmt, ##__VA_ARGS__) +#define eeh_edev_warn(edev, fmt, ...) EEH_EDEV_PRINT(warn, (edev), fmt, ##__VA_ARGS__) +#define eeh_edev_err(edev, fmt, ...) EEH_EDEV_PRINT(err, (edev), fmt, ##__VA_ARGS__) + static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev) { return edev ? edev->pdn : NULL; @@ -247,7 +273,7 @@ static inline bool eeh_state_active(int state) == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); } -typedef void *(*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag); +typedef void (*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag); typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag); void eeh_set_pe_aux_size(int size); int eeh_phb_pe_create(struct pci_controller *phb); @@ -261,20 +287,20 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev); void eeh_pe_update_time_stamp(struct eeh_pe *pe); void *eeh_pe_traverse(struct eeh_pe *root, eeh_pe_traverse_func fn, void *flag); -void *eeh_pe_dev_traverse(struct eeh_pe *root, - eeh_edev_traverse_func fn, void *flag); +void eeh_pe_dev_traverse(struct eeh_pe *root, + eeh_edev_traverse_func fn, void *flag); void eeh_pe_restore_bars(struct eeh_pe *pe); const char *eeh_pe_loc_get(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); struct eeh_dev *eeh_dev_init(struct pci_dn *pdn); void eeh_dev_phb_init_dynamic(struct pci_controller *phb); -void eeh_probe_devices(void); +void eeh_show_enabled(void); int __init eeh_ops_register(struct eeh_ops *ops); int __exit eeh_ops_unregister(const char *name); int eeh_check_failure(const volatile void __iomem *token); int eeh_dev_check_failure(struct eeh_dev *edev); -void eeh_addr_cache_build(void); +void eeh_addr_cache_init(void); void eeh_add_device_early(struct pci_dn *); void eeh_add_device_tree_early(struct pci_dn *); void eeh_add_device_late(struct pci_dev *); @@ -316,7 +342,7 @@ static inline bool eeh_enabled(void) return false; } -static inline void eeh_probe_devices(void) { } +static inline void eeh_show_enabled(void) { } static inline void *eeh_dev_init(struct pci_dn *pdn, void *data) { @@ -332,7 +358,7 @@ static inline int eeh_check_failure(const volatile void __iomem *token) #define eeh_dev_check_failure(x) (0) -static inline void eeh_addr_cache_build(void) { } +static inline void eeh_addr_cache_init(void) { } static inline void eeh_add_device_early(struct pci_dn *pdn) { } diff --git a/arch/powerpc/include/asm/elfnote.h b/arch/powerpc/include/asm/elfnote.h new file mode 100644 index 00000000000000..a201b6e9ae4495 --- /dev/null +++ b/arch/powerpc/include/asm/elfnote.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PowerPC ELF notes. + * + * Copyright 2019, IBM Corporation + */ + +#ifndef __ASM_POWERPC_ELFNOTE_H__ +#define __ASM_POWERPC_ELFNOTE_H__ + +/* + * These note types should live in a SHT_NOTE segment and have + * "PowerPC" in the name field. + */ + +/* + * The capabilities supported/required by this kernel (bitmap). + * + * This type uses a bitmap as "desc" field. Each bit is described + * in arch/powerpc/kernel/note.S + */ +#define PPC_ELFNOTE_CAPABILITIES 1 + +#endif /* __ASM_POWERPC_ELFNOTE_H__ */ diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h new file mode 100644 index 00000000000000..c814a2b5538932 --- /dev/null +++ b/arch/powerpc/include/asm/fadump-internal.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Firmware-Assisted Dump internal code. + * + * Copyright 2011, Mahesh Salgaonkar, IBM Corporation. + * Copyright 2019, Hari Bathini, IBM Corporation. + */ + +#ifndef _ASM_POWERPC_FADUMP_INTERNAL_H +#define _ASM_POWERPC_FADUMP_INTERNAL_H + +/* Maximum number of memory regions kernel supports */ +#define FADUMP_MAX_MEM_REGS 128 + +#ifndef CONFIG_PRESERVE_FA_DUMP + +/* The upper limit percentage for user specified boot memory size (25%) */ +#define MAX_BOOT_MEM_RATIO 4 + +#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt) + +/* Alignment per CMA requirement. */ +#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \ + max_t(unsigned long, MAX_ORDER - 1, \ + pageblock_order)) + +/* FAD commands */ +#define FADUMP_REGISTER 1 +#define FADUMP_UNREGISTER 2 +#define FADUMP_INVALIDATE 3 + +/* + * Copy the ascii values for first 8 characters from a string into u64 + * variable at their respective indexes. + * e.g. + * The string "FADMPINF" will be converted into 0x4641444d50494e46 + */ +static inline u64 fadump_str_to_u64(const char *str) +{ + u64 val = 0; + int i; + + for (i = 0; i < sizeof(val); i++) + val = (*str) ? (val << 8) | *str++ : val << 8; + return val; +} + +#define FADUMP_CPU_UNKNOWN (~((u32)0)) + +#define FADUMP_CRASH_INFO_MAGIC fadump_str_to_u64("FADMPINF") + +/* fadump crash info structure */ +struct fadump_crash_info_header { + u64 magic_number; + u64 elfcorehdr_addr; + u32 crashing_cpu; + struct pt_regs regs; + struct cpumask online_mask; +}; + +struct fadump_memory_range { + u64 base; + u64 size; +}; + +/* fadump memory ranges info */ +struct fadump_mrange_info { + char name[16]; + struct fadump_memory_range *mem_ranges; + u32 mem_ranges_sz; + u32 mem_range_cnt; + u32 max_mem_ranges; +}; + +/* Platform specific callback functions */ +struct fadump_ops; + +/* Firmware-assisted dump configuration details. */ +struct fw_dump { + unsigned long reserve_dump_area_start; + unsigned long reserve_dump_area_size; + /* cmd line option during boot */ + unsigned long reserve_bootvar; + + unsigned long cpu_state_data_size; + u64 cpu_state_dest_vaddr; + u32 cpu_state_data_version; + u32 cpu_state_entry_size; + + unsigned long hpte_region_size; + + unsigned long boot_memory_size; + u64 boot_mem_dest_addr; + u64 boot_mem_addr[FADUMP_MAX_MEM_REGS]; + u64 boot_mem_sz[FADUMP_MAX_MEM_REGS]; + u64 boot_mem_top; + u64 boot_mem_regs_cnt; + + unsigned long fadumphdr_addr; + unsigned long cpu_notes_buf_vaddr; + unsigned long cpu_notes_buf_size; + + /* + * Maximum size supported by firmware to copy from source to + * destination address per entry. + */ + u64 max_copy_size; + u64 kernel_metadata; + + int ibm_configure_kernel_dump; + + unsigned long fadump_enabled:1; + unsigned long fadump_supported:1; + unsigned long dump_active:1; + unsigned long dump_registered:1; + unsigned long nocma:1; + + struct fadump_ops *ops; +}; + +struct fadump_ops { + u64 (*fadump_init_mem_struct)(struct fw_dump *fadump_conf); + u64 (*fadump_get_metadata_size)(void); + int (*fadump_setup_metadata)(struct fw_dump *fadump_conf); + u64 (*fadump_get_bootmem_min)(void); + int (*fadump_register)(struct fw_dump *fadump_conf); + int (*fadump_unregister)(struct fw_dump *fadump_conf); + int (*fadump_invalidate)(struct fw_dump *fadump_conf); + void (*fadump_cleanup)(struct fw_dump *fadump_conf); + int (*fadump_process)(struct fw_dump *fadump_conf); + void (*fadump_region_show)(struct fw_dump *fadump_conf, + struct seq_file *m); + void (*fadump_trigger)(struct fadump_crash_info_header *fdh, + const char *msg); +}; + +/* Helper functions */ +s32 fadump_setup_cpu_notes_buf(u32 num_cpus); +void fadump_free_cpu_notes_buf(void); +u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs); +void fadump_update_elfcore_header(char *bufp); +bool is_fadump_boot_mem_contiguous(void); +bool is_fadump_reserved_mem_contiguous(void); + +#else /* !CONFIG_PRESERVE_FA_DUMP */ + +/* Firmware-assisted dump configuration details. */ +struct fw_dump { + u64 boot_mem_top; + u64 dump_active; +}; + +#endif /* CONFIG_PRESERVE_FA_DUMP */ + +#ifdef CONFIG_PPC_PSERIES +extern void rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node); +#else +static inline void +rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { } +#endif + +#ifdef CONFIG_PPC_POWERNV +extern void opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node); +#else +static inline void +opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { } +#endif + +#endif /* _ASM_POWERPC_FADUMP_INTERNAL_H */ diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 17d9b6acaf6385..526a6a6473128a 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -6,196 +6,14 @@ * Author: Mahesh Salgaonkar */ -#ifndef __PPC64_FA_DUMP_H__ -#define __PPC64_FA_DUMP_H__ +#ifndef _ASM_POWERPC_FADUMP_H +#define _ASM_POWERPC_FADUMP_H #ifdef CONFIG_FA_DUMP -/* - * The RMA region will be saved for later dumping when kernel crashes. - * RMA is Real Mode Area, the first block of logical memory address owned - * by logical partition, containing the storage that may be accessed with - * translate off. - */ -#define RMA_START 0x0 -#define RMA_END (ppc64_rma_size) - -/* - * On some Power systems where RMO is 128MB, it still requires minimum of - * 256MB for kernel to boot successfully. When kdump infrastructure is - * configured to save vmcore over network, we run into OOM issue while - * loading modules related to network setup. Hence we need aditional 64M - * of memory to avoid OOM issue. - */ -#define MIN_BOOT_MEM (((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \ - + (0x1UL << 26)) - -/* The upper limit percentage for user specified boot memory size (25%) */ -#define MAX_BOOT_MEM_RATIO 4 - -#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt) - -/* Alignement per CMA requirement. */ -#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \ - max_t(unsigned long, MAX_ORDER - 1, pageblock_order)) - -/* Firmware provided dump sections */ -#define FADUMP_CPU_STATE_DATA 0x0001 -#define FADUMP_HPTE_REGION 0x0002 -#define FADUMP_REAL_MODE_REGION 0x0011 - -/* Dump request flag */ -#define FADUMP_REQUEST_FLAG 0x00000001 - -/* FAD commands */ -#define FADUMP_REGISTER 1 -#define FADUMP_UNREGISTER 2 -#define FADUMP_INVALIDATE 3 - -/* Dump status flag */ -#define FADUMP_ERROR_FLAG 0x2000 - -#define FADUMP_CPU_ID_MASK ((1UL << 32) - 1) - -#define CPU_UNKNOWN (~((u32)0)) - -/* Utility macros */ -#define SKIP_TO_NEXT_CPU(reg_entry) \ -({ \ - while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) \ - reg_entry++; \ - reg_entry++; \ -}) - extern int crashing_cpu; -/* Kernel Dump section info */ -struct fadump_section { - __be32 request_flag; - __be16 source_data_type; - __be16 error_flags; - __be64 source_address; - __be64 source_len; - __be64 bytes_dumped; - __be64 destination_address; -}; - -/* ibm,configure-kernel-dump header. */ -struct fadump_section_header { - __be32 dump_format_version; - __be16 dump_num_sections; - __be16 dump_status_flag; - __be32 offset_first_dump_section; - - /* Fields for disk dump option. */ - __be32 dd_block_size; - __be64 dd_block_offset; - __be64 dd_num_blocks; - __be32 dd_offset_disk_path; - - /* Maximum time allowed to prevent an automatic dump-reboot. */ - __be32 max_time_auto; -}; - -/* - * Firmware Assisted dump memory structure. This structure is required for - * registering future kernel dump with power firmware through rtas call. - * - * No disk dump option. Hence disk dump path string section is not included. - */ -struct fadump_mem_struct { - struct fadump_section_header header; - - /* Kernel dump sections */ - struct fadump_section cpu_state_data; - struct fadump_section hpte_region; - struct fadump_section rmr_region; -}; - -/* Firmware-assisted dump configuration details. */ -struct fw_dump { - unsigned long cpu_state_data_size; - unsigned long hpte_region_size; - unsigned long boot_memory_size; - unsigned long reserve_dump_area_start; - unsigned long reserve_dump_area_size; - /* cmd line option during boot */ - unsigned long reserve_bootvar; - - unsigned long fadumphdr_addr; - unsigned long cpu_notes_buf; - unsigned long cpu_notes_buf_size; - - int ibm_configure_kernel_dump; - - unsigned long fadump_enabled:1; - unsigned long fadump_supported:1; - unsigned long dump_active:1; - unsigned long dump_registered:1; - unsigned long nocma:1; -}; - -/* - * Copy the ascii values for first 8 characters from a string into u64 - * variable at their respective indexes. - * e.g. - * The string "FADMPINF" will be converted into 0x4641444d50494e46 - */ -static inline u64 str_to_u64(const char *str) -{ - u64 val = 0; - int i; - - for (i = 0; i < sizeof(val); i++) - val = (*str) ? (val << 8) | *str++ : val << 8; - return val; -} -#define STR_TO_HEX(x) str_to_u64(x) -#define REG_ID(x) str_to_u64(x) - -#define FADUMP_CRASH_INFO_MAGIC STR_TO_HEX("FADMPINF") -#define REGSAVE_AREA_MAGIC STR_TO_HEX("REGSAVE") - -/* The firmware-assisted dump format. - * - * The register save area is an area in the partition's memory used to preserve - * the register contents (CPU state data) for the active CPUs during a firmware - * assisted dump. The dump format contains register save area header followed - * by register entries. Each list of registers for a CPU starts with - * "CPUSTRT" and ends with "CPUEND". - */ - -/* Register save area header. */ -struct fadump_reg_save_area_header { - __be64 magic_number; - __be32 version; - __be32 num_cpu_offset; -}; - -/* Register entry. */ -struct fadump_reg_entry { - __be64 reg_id; - __be64 reg_value; -}; - -/* fadump crash info structure */ -struct fadump_crash_info_header { - u64 magic_number; - u64 elfcorehdr_addr; - u32 crashing_cpu; - struct pt_regs regs; - struct cpumask online_mask; -}; - -struct fad_crash_memory_ranges { - unsigned long long base; - unsigned long long size; -}; - extern int is_fadump_memory_area(u64 addr, ulong size); -extern int early_init_dt_scan_fw_dump(unsigned long node, - const char *uname, int depth, void *data); -extern int fadump_reserve_mem(void); extern int setup_fadump(void); extern int is_fadump_active(void); extern int should_fadump_crash(void); @@ -207,5 +25,11 @@ static inline int is_fadump_active(void) { return 0; } static inline int should_fadump_crash(void) { return 0; } static inline void crash_fadump(struct pt_regs *regs, const char *str) { } static inline void fadump_cleanup(void) { } +#endif /* !CONFIG_FA_DUMP */ + +#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) +extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname, + int depth, void *data); +extern int fadump_reserve_mem(void); #endif -#endif +#endif /* _ASM_POWERPC_FADUMP_H */ diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index faeca8b76c8cf9..b3e214a97f3ad5 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -50,6 +50,7 @@ #define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000) #define FW_FEATURE_BLOCK_REMOVE ASM_CONST(0x0000001000000000) #define FW_FEATURE_PAPR_SCM ASM_CONST(0x0000002000000000) +#define FW_FEATURE_ULTRAVISOR ASM_CONST(0x0000004000000000) #ifndef __ASSEMBLY__ @@ -68,9 +69,9 @@ enum { FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN | FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 | FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE | - FW_FEATURE_PAPR_SCM, + FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR, FW_FEATURE_PSERIES_ALWAYS = 0, - FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL, + FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_ULTRAVISOR, FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 3dfb80b8656140..f54a08a2cd7092 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -8,6 +8,8 @@ #define MCOUNT_ADDR ((unsigned long)(_mcount)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + #ifdef __ASSEMBLY__ /* Based off of objdump optput from glibc */ diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 3a6aa57b9d9010..eea28ca679dbbb 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, pagefault_enable(); - if (!ret) - *oval = oldval; + *oval = oldval; prevent_write_to_user(uaddr, sizeof(*uaddr)); return ret; diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h index a466765709a9bb..2dabcf66829292 100644 --- a/arch/powerpc/include/asm/head-64.h +++ b/arch/powerpc/include/asm/head-64.h @@ -169,47 +169,6 @@ end_##sname: #define ABS_ADDR(label) (label - fs_label + fs_start) -#define EXC_REAL_BEGIN(name, start, size) \ - FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) - -#define EXC_REAL_END(name, start, size) \ - FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) - -#define EXC_VIRT_BEGIN(name, start, size) \ - FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) - -#define EXC_VIRT_END(name, start, size) \ - FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) - -#define EXC_COMMON_BEGIN(name) \ - USE_TEXT_SECTION(); \ - .balign IFETCH_ALIGN_BYTES; \ - .global name; \ - _ASM_NOKPROBE_SYMBOL(name); \ - DEFINE_FIXED_SYMBOL(name); \ -name: - -#define TRAMP_REAL_BEGIN(name) \ - FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) - -#define TRAMP_VIRT_BEGIN(name) \ - FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) - -#ifdef CONFIG_KVM_BOOK3S_64_HANDLER -#define TRAMP_KVM_BEGIN(name) \ - TRAMP_VIRT_BEGIN(name) -#else -#define TRAMP_KVM_BEGIN(name) -#endif - -#define EXC_REAL_NONE(start, size) \ - FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ - FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) - -#define EXC_VIRT_NONE(start, size) \ - FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ - FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_HEAD_64_H */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 20a101046cffc3..bd6504c28c2f20 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -31,9 +31,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, return 0; } -void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, - pte_t pte); - #define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h index 01567ea4ceaf7b..3cce499fbe27a2 100644 --- a/arch/powerpc/include/asm/io-workarounds.h +++ b/arch/powerpc/include/asm/io-workarounds.h @@ -8,6 +8,7 @@ #ifndef _IO_WORKAROUNDS_H #define _IO_WORKAROUNDS_H +#ifdef CONFIG_PPC_IO_WORKAROUNDS #include #include @@ -32,4 +33,23 @@ extern int spiderpci_iowa_init(struct iowa_bus *, void *); #define SPIDER_PCI_DUMMY_READ 0x0810 #define SPIDER_PCI_DUMMY_READ_BASE 0x0814 +#endif + +#if defined(CONFIG_PPC_IO_WORKAROUNDS) && defined(CONFIG_PPC_INDIRECT_MMIO) +extern bool io_workaround_inited; + +static inline bool iowa_is_active(void) +{ + return unlikely(io_workaround_inited); +} +#else +static inline bool iowa_is_active(void) +{ + return false; +} +#endif + +void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, + pgprot_t prot, void *caller); + #endif /* _IO_WORKAROUNDS_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 23e5d5d16c7e77..a63ec938636de5 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -705,16 +705,9 @@ static inline void iosync(void) * create hand-made mappings for use only by the PCI code and cannot * currently be hooked. Must be page aligned. * - * * __ioremap is the low level implementation used by ioremap and - * ioremap_prot and cannot be hooked (but can be used by a hook on one - * of the previous ones) - * * * __ioremap_caller is the same as above but takes an explicit caller * reference rather than using __builtin_return_address(0) * - * * __iounmap, is the low level implementation used by iounmap and cannot - * be hooked (but can be used by a hook on iounmap) - * */ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, @@ -729,13 +722,14 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); extern void iounmap(volatile void __iomem *addr); -extern void __iomem *__ioremap(phys_addr_t, unsigned long size, - unsigned long flags); +int early_ioremap_range(unsigned long ea, phys_addr_t pa, + unsigned long size, pgprot_t prot); +void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, + pgprot_t prot, void *caller); + extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, pgprot_t prot, void *caller); -extern void __iounmap(volatile void __iomem *addr); - extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot); extern void __iounmap_at(void *ea, unsigned long size); diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 18d342b815e4c0..350101e11ddb4a 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -48,15 +48,16 @@ struct iommu_table_ops { * returns old TCE and DMA direction mask. * @tce is a physical address. */ - int (*exchange)(struct iommu_table *tbl, + int (*xchg_no_kill)(struct iommu_table *tbl, long index, unsigned long *hpa, - enum dma_data_direction *direction); - /* Real mode */ - int (*exchange_rm)(struct iommu_table *tbl, - long index, - unsigned long *hpa, - enum dma_data_direction *direction); + enum dma_data_direction *direction, + bool realmode); + + void (*tce_kill)(struct iommu_table *tbl, + unsigned long index, + unsigned long pages, + bool realmode); __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); #endif @@ -111,6 +112,8 @@ struct iommu_table { struct iommu_table_ops *it_ops; struct kref it_kref; int it_nid; + unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */ + unsigned long it_reserved_end; }; #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ @@ -149,8 +152,9 @@ extern int iommu_tce_table_put(struct iommu_table *tbl); /* Initializes an iommu_table based in values set in the passed-in * structure */ -extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, - int nid); +extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, + int nid, unsigned long res_start, unsigned long res_end); + #define IOMMU_TABLE_GROUP_MAX_TABLES 2 struct iommu_table_group; @@ -206,6 +210,12 @@ extern void iommu_del_device(struct device *dev); extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction); +extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, + struct iommu_table *tbl, + unsigned long entry, unsigned long *hpa, + enum dma_data_direction *direction); +extern void iommu_tce_kill(struct iommu_table *tbl, + unsigned long entry, unsigned long pages); #else static inline void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6fb5fb4779e0dc..6fe6ad64cba576 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -297,6 +297,7 @@ struct kvm_arch { cpumask_t cpu_in_guest; u8 radix; u8 fwnmi_enabled; + u8 secure_guest; bool threads_indep; bool nested_enable; pgd_t *pgtable; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index c43d6eca9eddef..7bcb64444a3943 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -3,9 +3,6 @@ #define _ASM_POWERPC_MACHDEP_H #ifdef __KERNEL__ -/* - */ - #include #include #include @@ -31,10 +28,6 @@ struct pci_host_bridge; struct machdep_calls { char *name; #ifdef CONFIG_PPC64 - void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, - pgprot_t prot, void *caller); - void (*iounmap)(volatile void __iomem *token); - #ifdef CONFIG_PM void (*iommu_save)(void); void (*iommu_restore)(void); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index a4c6a74ad2fb7a..6a6ddaabdb34db 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -30,6 +30,10 @@ enum MCE_Disposition { enum MCE_Initiator { MCE_INITIATOR_UNKNOWN = 0, MCE_INITIATOR_CPU = 1, + MCE_INITIATOR_PCI = 2, + MCE_INITIATOR_ISA = 3, + MCE_INITIATOR_MEMORY= 4, + MCE_INITIATOR_POWERMGM = 5, }; enum MCE_ErrorType { @@ -41,6 +45,8 @@ enum MCE_ErrorType { MCE_ERROR_TYPE_USER = 5, MCE_ERROR_TYPE_RA = 6, MCE_ERROR_TYPE_LINK = 7, + MCE_ERROR_TYPE_DCACHE = 8, + MCE_ERROR_TYPE_ICACHE = 9, }; enum MCE_ErrorClass { @@ -122,7 +128,8 @@ struct machine_check_event { enum MCE_UeErrorType ue_error_type:8; u8 effective_address_provided; u8 physical_address_provided; - u8 reserved_1[5]; + u8 ignore_event; + u8 reserved_1[4]; u64 effective_address; u64 physical_address; u8 reserved_2[8]; @@ -193,6 +200,7 @@ struct mce_error_info { enum MCE_Initiator initiator:8; enum MCE_ErrorClass error_class:8; bool sync_error; + bool ignore_event; }; #define MAX_MC_EVT 100 diff --git a/arch/powerpc/include/asm/mem_encrypt.h b/arch/powerpc/include/asm/mem_encrypt.h new file mode 100644 index 00000000000000..ba9dab07c1bed4 --- /dev/null +++ b/arch/powerpc/include/asm/mem_encrypt.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * SVM helper functions + * + * Copyright 2018 IBM Corporation + */ + +#ifndef _ASM_POWERPC_MEM_ENCRYPT_H +#define _ASM_POWERPC_MEM_ENCRYPT_H + +#include + +static inline bool mem_encrypt_active(void) +{ + return is_secure_guest(); +} + +static inline bool force_dma_unencrypted(struct device *dev) +{ + return is_secure_guest(); +} + +int set_memory_encrypted(unsigned long addr, int numpages); +int set_memory_decrypted(unsigned long addr, int numpages); + +#endif /* _ASM_POWERPC_MEM_ENCRYPT_H */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index ba94ce8c22d7b7..0699cfeeb8c948 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -257,7 +257,7 @@ extern void radix__mmu_cleanup_all(void); /* Functions for creating and updating partition table on POWER9 */ extern void mmu_partition_table_init(void); extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, - unsigned long dw1); + unsigned long dw1, bool flush); #endif /* CONFIG_PPC64 */ struct mm_struct; diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 0284f8f5305f8a..552b96eef0c8ed 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -11,8 +11,6 @@ #include /* For sub-arch specific PPC_PIN_SIZE */ #include -extern unsigned long ioremap_bot; - #ifdef CONFIG_44x extern int icache_44x_need_flush; #endif @@ -78,23 +76,21 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #include -#ifdef CONFIG_HIGHMEM -#define KVIRT_TOP PKMAP_BASE -#else -#define KVIRT_TOP FIXADDR_START -#endif - /* * ioremap_bot starts at that address. Early ioremaps move down from there, * until mem_init() at which point this becomes the top of the vmalloc * and ioremap space */ -#ifdef CONFIG_NOT_COHERENT_CACHE -#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) +#ifdef CONFIG_HIGHMEM +#define IOREMAP_TOP PKMAP_BASE #else -#define IOREMAP_TOP KVIRT_TOP +#define IOREMAP_TOP FIXADDR_START #endif +/* PPC32 shares vmalloc area with ioremap */ +#define IOREMAP_START VMALLOC_START +#define IOREMAP_END VMALLOC_END + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 16MB value just means that there will be a 64MB "hole" after the diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index b9f66cf15c310a..9a33b8bd842d97 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -53,6 +53,7 @@ #define PHB_IO_BASE (ISA_IO_END) #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define IOREMAP_BASE (PHB_IO_END) +#define IOREMAP_START (ioremap_bot) #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 1ca1c1864b3234..7fed9dc0f147a6 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -293,5 +293,18 @@ static inline int pgd_huge(pgd_t pgd) #define is_hugepd(hpd) (hugepd_ok(hpd)) #endif +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + */ +#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); +#else +static inline +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 383242eb0dea0f..378e3997845ab7 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -208,7 +208,10 @@ #define OPAL_HANDLE_HMI2 166 #define OPAL_NX_COPROC_INIT 167 #define OPAL_XIVE_GET_VP_STATE 170 -#define OPAL_LAST 170 +#define OPAL_MPIPL_UPDATE 173 +#define OPAL_MPIPL_REGISTER_TAG 174 +#define OPAL_MPIPL_QUERY_TAG 175 +#define OPAL_LAST 175 #define QUIESCE_HOLD 1 /* Spin all calls at entry */ #define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */ @@ -453,6 +456,7 @@ enum opal_msg_type { OPAL_MSG_DPO = 5, OPAL_MSG_PRD = 6, OPAL_MSG_OCC = 7, + OPAL_MSG_PRD2 = 8, OPAL_MSG_TYPE_MAX, }; @@ -1059,6 +1063,7 @@ enum { OPAL_REBOOT_NORMAL = 0, OPAL_REBOOT_PLATFORM_ERROR = 1, OPAL_REBOOT_FULL_IPL = 2, + OPAL_REBOOT_MPIPL = 3, }; /* Argument to OPAL_PCI_TCE_KILL */ @@ -1135,6 +1140,44 @@ enum { #define OPAL_PCI_P2P_LOAD 0x2 #define OPAL_PCI_P2P_STORE 0x4 +/* MPIPL update operations */ +enum opal_mpipl_ops { + OPAL_MPIPL_ADD_RANGE = 0, + OPAL_MPIPL_REMOVE_RANGE = 1, + OPAL_MPIPL_REMOVE_ALL = 2, + OPAL_MPIPL_FREE_PRESERVED_MEMORY = 3, +}; + +/* Tag will point to various metadata area. Kernel will + * use tag to get metadata value. + */ +enum opal_mpipl_tags { + OPAL_MPIPL_TAG_CPU = 0, + OPAL_MPIPL_TAG_OPAL = 1, + OPAL_MPIPL_TAG_KERNEL = 2, + OPAL_MPIPL_TAG_BOOT_MEM = 3, +}; + +/* Preserved memory details */ +struct opal_mpipl_region { + __be64 src; + __be64 dest; + __be64 size; +}; + +/* Structure version */ +#define OPAL_MPIPL_VERSION 0x01 + +struct opal_mpipl_fadump { + u8 version; + u8 reserved[7]; + __be32 crashing_pir; /* OPAL crashing CPU PIR */ + __be32 cpu_data_version; + __be32 cpu_data_size; + __be32 region_cnt; + struct opal_mpipl_region region[]; +} __packed; + #endif /* __ASSEMBLY__ */ #endif /* __OPAL_API_H */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 57bd029c715e11..a0cf8fba4d12bd 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -39,6 +39,7 @@ int64_t opal_npu_spa_clear_cache(uint64_t phb_id, uint32_t bdfn, uint64_t PE_handle); int64_t opal_npu_tl_set(uint64_t phb_id, uint32_t bdfn, long cap, uint64_t rate_phys, uint32_t size); + int64_t opal_console_write(int64_t term_number, __be64 *length, const uint8_t *buffer); int64_t opal_console_read(int64_t term_number, __be64 *length, @@ -272,7 +273,7 @@ int64_t opal_xive_get_vp_info(uint64_t vp, int64_t opal_xive_set_vp_info(uint64_t vp, uint64_t flags, uint64_t report_cl_pair); -int64_t opal_xive_allocate_irq(uint32_t chip_id); +int64_t opal_xive_allocate_irq_raw(uint32_t chip_id); int64_t opal_xive_free_irq(uint32_t girq); int64_t opal_xive_sync(uint32_t type, uint32_t id); int64_t opal_xive_dump(uint32_t type, uint32_t id); @@ -297,6 +298,10 @@ int opal_sensor_group_clear(u32 group_hndl, int token); int opal_sensor_group_enable(u32 group_hndl, int token, bool enable); int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct); +s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size); +s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr); +s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr); + s64 opal_signal_system_reset(s32 cpu); s64 opal_quiesce(u64 shutdown_type, s32 cpu); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 0d52f57fca04fc..c8bb14ff47135d 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -215,9 +215,19 @@ static inline bool pfn_valid(unsigned long pfn) /* * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. + * This also results in better code generation. */ -#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) -#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) +#define __va(x) \ +({ \ + VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \ + (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \ +}) + +#define __pa(x) \ +({ \ + VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \ + (unsigned long)(x) & 0x0fffffffffffffffUL; \ +}) #else /* 32-bit, non book E */ #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index 683dfbc67ca8f5..d64dfe3ac71246 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -40,6 +40,8 @@ typedef unsigned long long pte_basic_t; typedef unsigned long pte_basic_t; #endif +#include + /* * Clear page using the dcbz instruction, which doesn't cause any * memory traffic (except to write out any cache lines which get @@ -49,6 +51,8 @@ static inline void clear_page(void *addr) { unsigned int i; + WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1)); + for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES) dcbz(addr); } diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 8dad1fdf4bd20c..ea6ec65970efc5 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -183,6 +183,7 @@ struct iommu_table; struct pci_dn { int flags; #define PCI_DN_FLAG_IOV_VF 0x01 +#define PCI_DN_FLAG_DEAD 0x02 /* Device has been hot-removed */ int busno; /* pci bus number */ int devfn; /* pci device and function number */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index c58ba796368864..8b7865a2d57663 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -68,6 +68,8 @@ extern pgd_t swapper_pg_dir[]; extern void paging_init(void); +extern unsigned long ioremap_bot; + /* * kern_addr_valid is intended to indicate whether an address is a valid * kernel address. Most 32-bit archs define it as always true (like this) @@ -77,18 +79,6 @@ extern void paging_init(void); #include - -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to ensure coherency between the i-cache and d-cache - * for the page which has just been mapped in. - * On machines which use an MMU hash table, we use this to put a - * corresponding HPTE into the hash table ahead of time, instead of - * waiting for the inevitable extra hash-table miss exception. - */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); - #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_large(pmd) 0 #endif diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index cff5a411e59545..4497c8afb573b8 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -340,6 +340,12 @@ static inline long plpar_set_ciabr(unsigned long ciabr) { return 0; } + +static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex, + unsigned long *ptes) +{ + return 0; +} #endif /* CONFIG_PPC_PSERIES */ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index cec2d640951558..7f4be5a05eb3f4 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -62,11 +62,6 @@ void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode); void eeh_sysfs_add_device(struct pci_dev *pdev); void eeh_sysfs_remove_device(struct pci_dev *pdev); -static inline const char *eeh_pci_name(struct pci_dev *pdev) -{ - return pdev ? pci_name(pdev) : ""; -} - static inline const char *eeh_driver_name(struct pci_dev *pdev) { return (pdev && pdev->driver) ? pdev->driver->name : ""; @@ -74,6 +69,8 @@ static inline const char *eeh_driver_name(struct pci_dev *pdev) #endif /* CONFIG_EEH */ +#define PCI_BUSNO(bdfn) ((bdfn >> 8) & 0xff) + #else /* CONFIG_PCI */ static inline void init_pci_config_tokens(void) { } #endif /* !CONFIG_PCI */ diff --git a/arch/powerpc/include/asm/ppc4xx_ocm.h b/arch/powerpc/include/asm/ppc4xx_ocm.h deleted file mode 100644 index fc4db6dcde8418..00000000000000 --- a/arch/powerpc/include/asm/ppc4xx_ocm.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * PowerPC 4xx OCM memory allocation support - * - * (C) Copyright 2009, Applied Micro Circuits Corporation - * Victor Gallardo (vgallardo@amcc.com) - * - * See file CREDITS for list of people who contributed to this - * project. - */ - -#ifndef __ASM_POWERPC_PPC4XX_OCM_H__ -#define __ASM_POWERPC_PPC4XX_OCM_H__ - -#define PPC4XX_OCM_NON_CACHED 0 -#define PPC4XX_OCM_CACHED 1 - -#if defined(CONFIG_PPC4xx_OCM) - -void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, - int flags, const char *owner); -void ppc4xx_ocm_free(const void *virt); - -#else - -#define ppc4xx_ocm_alloc(phys, size, align, flags, owner) NULL -#define ppc4xx_ocm_free(addr) ((void)0) - -#endif /* CONFIG_PPC4xx_OCM */ - -#endif /* __ASM_POWERPC_PPC4XX_OCM_H__ */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index e0637730a8e7e5..6b03dff61a05e2 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -311,18 +311,48 @@ GLUE(.,name): addis reg,reg,(name - 0b)@ha; \ addi reg,reg,(name - 0b)@l; -#ifdef __powerpc64__ -#ifdef HAVE_AS_ATHIGH +#if defined(__powerpc64__) && defined(HAVE_AS_ATHIGH) #define __AS_ATHIGH high #else #define __AS_ATHIGH h #endif -#define LOAD_REG_IMMEDIATE(reg,expr) \ - lis reg,(expr)@highest; \ - ori reg,reg,(expr)@higher; \ - rldicr reg,reg,32,31; \ - oris reg,reg,(expr)@__AS_ATHIGH; \ - ori reg,reg,(expr)@l; + +.macro __LOAD_REG_IMMEDIATE_32 r, x + .if (\x) >= 0x8000 || (\x) < -0x8000 + lis \r, (\x)@__AS_ATHIGH + .if (\x) & 0xffff != 0 + ori \r, \r, (\x)@l + .endif + .else + li \r, (\x)@l + .endif +.endm + +.macro __LOAD_REG_IMMEDIATE r, x + .if (\x) >= 0x80000000 || (\x) < -0x80000000 + __LOAD_REG_IMMEDIATE_32 \r, (\x) >> 32 + sldi \r, \r, 32 + .if (\x) & 0xffff0000 != 0 + oris \r, \r, (\x)@__AS_ATHIGH + .endif + .if (\x) & 0xffff != 0 + ori \r, \r, (\x)@l + .endif + .else + __LOAD_REG_IMMEDIATE_32 \r, \x + .endif +.endm + +#ifdef __powerpc64__ + +#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr + +#define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \ + lis tmp, (expr)@highest; \ + lis reg, (expr)@__AS_ATHIGH; \ + ori tmp, tmp, (expr)@higher; \ + ori reg, reg, (expr)@l; \ + rldimi reg, tmp, 32, 0 #define LOAD_REG_ADDR(reg,name) \ ld reg,name@got(r2) @@ -335,11 +365,13 @@ GLUE(.,name): #else /* 32-bit */ -#define LOAD_REG_IMMEDIATE(reg,expr) \ +#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr + +#define LOAD_REG_IMMEDIATE_SYM(reg,expr) \ lis reg,(expr)@ha; \ addi reg,reg,(expr)@l; -#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name) +#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE_SYM(reg, name) #define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha #define ADDROFF(name) name@l @@ -351,19 +383,9 @@ GLUE(.,name): /* various errata or part fixups */ #ifdef CONFIG_PPC601_SYNC_FIX -#define SYNC \ -BEGIN_FTR_SECTION \ - sync; \ - isync; \ -END_FTR_SECTION_IFSET(CPU_FTR_601) -#define SYNC_601 \ -BEGIN_FTR_SECTION \ - sync; \ -END_FTR_SECTION_IFSET(CPU_FTR_601) -#define ISYNC_601 \ -BEGIN_FTR_SECTION \ - isync; \ -END_FTR_SECTION_IFSET(CPU_FTR_601) +#define SYNC sync; isync +#define SYNC_601 sync +#define ISYNC_601 isync #else #define SYNC #define SYNC_601 @@ -389,15 +411,11 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) #define MFTBU(dest) mfspr dest, SPRN_TBRU #endif -#ifndef CONFIG_SMP -#define TLBSYNC -#else /* CONFIG_SMP */ /* tlbsync is not implemented on 601 */ -#define TLBSYNC \ -BEGIN_FTR_SECTION \ - tlbsync; \ - sync; \ -END_FTR_SECTION_IFCLR(CPU_FTR_601) +#if !defined(CONFIG_SMP) || defined(CONFIG_PPC_BOOK3S_601) +#define TLBSYNC +#else +#define TLBSYNC tlbsync; sync #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index feee1b21bbd5f1..ee3ada66deb58c 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -203,7 +203,11 @@ do { \ #endif /* __powerpc64__ */ #define arch_has_single_step() (1) -#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) +#ifndef CONFIG_BOOK3S_601 +#define arch_has_block_step() (true) +#else +#define arch_has_block_step() (false) +#endif #define ARCH_HAS_USER_SINGLE_STEP_REPORT /* diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 10caa145f98b89..ec3714cf0989bf 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -38,6 +38,7 @@ #define MSR_TM_LG 32 /* Trans Mem Available */ #define MSR_VEC_LG 25 /* Enable AltiVec */ #define MSR_VSX_LG 23 /* Enable VSX */ +#define MSR_S_LG 22 /* Secure state */ #define MSR_POW_LG 18 /* Enable Power Management */ #define MSR_WE_LG 18 /* Wait State Enable */ #define MSR_TGPR_LG 17 /* TLB Update registers in use */ @@ -71,11 +72,13 @@ #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */ #define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */ #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */ +#define MSR_S __MASK(MSR_S_LG) /* Secure state */ #else /* so tests for these bits fail on 32-bit */ #define MSR_SF 0 #define MSR_ISF 0 #define MSR_HV 0 +#define MSR_S 0 #endif /* diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h deleted file mode 100644 index 08c44396e54a4c..00000000000000 --- a/arch/powerpc/include/asm/scom.h +++ /dev/null @@ -1,154 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2010 Benjamin Herrenschmidt, IBM Corp - * - * and David Gibson, IBM Corporation. - */ - -#ifndef _ASM_POWERPC_SCOM_H -#define _ASM_POWERPC_SCOM_H - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ -#ifdef CONFIG_PPC_SCOM - -/* - * The SCOM bus is a sideband bus used for accessing various internal - * registers of the processor or the chipset. The implementation details - * differ between processors and platforms, and the access method as - * well. - * - * This API allows to "map" ranges of SCOM register numbers associated - * with a given SCOM controller. The later must be represented by a - * device node, though some implementations might support NULL if there - * is no possible ambiguity - * - * Then, scom_read/scom_write can be used to accesses registers inside - * that range. The argument passed is a register number relative to - * the beginning of the range mapped. - */ - -typedef void *scom_map_t; - -/* Value for an invalid SCOM map */ -#define SCOM_MAP_INVALID (NULL) - -/* The scom_controller data structure is what the platform passes - * to the core code in scom_init, it provides the actual implementation - * of all the SCOM functions - */ -struct scom_controller { - scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count); - void (*unmap)(scom_map_t map); - - int (*read)(scom_map_t map, u64 reg, u64 *value); - int (*write)(scom_map_t map, u64 reg, u64 value); -}; - -extern const struct scom_controller *scom_controller; - -/** - * scom_init - Initialize the SCOM backend, called by the platform - * @controller: The platform SCOM controller - */ -static inline void scom_init(const struct scom_controller *controller) -{ - scom_controller = controller; -} - -/** - * scom_map_ok - Test is a SCOM mapping is successful - * @map: The result of scom_map to test - */ -static inline int scom_map_ok(scom_map_t map) -{ - return map != SCOM_MAP_INVALID; -} - -/** - * scom_map - Map a block of SCOM registers - * @ctrl_dev: Device node of the SCOM controller - * some implementations allow NULL here - * @reg: first SCOM register to map - * @count: Number of SCOM registers to map - */ - -static inline scom_map_t scom_map(struct device_node *ctrl_dev, - u64 reg, u64 count) -{ - return scom_controller->map(ctrl_dev, reg, count); -} - -/** - * scom_find_parent - Find the SCOM controller for a device - * @dev: OF node of the device - * - * This is not meant for general usage, but in combination with - * scom_map() allows to map registers not represented by the - * device own scom-reg property. Useful for applying HW workarounds - * on things not properly represented in the device-tree for example. - */ -struct device_node *scom_find_parent(struct device_node *dev); - - -/** - * scom_map_device - Map a device's block of SCOM registers - * @dev: OF node of the device - * @index: Register bank index (index in "scom-reg" property) - * - * This function will use the device-tree binding for SCOM which - * is to follow "scom-parent" properties until it finds a node with - * a "scom-controller" property to find the controller. It will then - * use the "scom-reg" property which is made of reg/count pairs, - * each of them having a size defined by the controller's #scom-cells - * property - */ -extern scom_map_t scom_map_device(struct device_node *dev, int index); - - -/** - * scom_unmap - Unmap a block of SCOM registers - * @map: Result of scom_map is to be unmapped - */ -static inline void scom_unmap(scom_map_t map) -{ - if (scom_map_ok(map)) - scom_controller->unmap(map); -} - -/** - * scom_read - Read a SCOM register - * @map: Result of scom_map - * @reg: Register index within that map - * @value: Updated with the value read - * - * Returns 0 (success) or a negative error code - */ -static inline int scom_read(scom_map_t map, u64 reg, u64 *value) -{ - int rc; - - rc = scom_controller->read(map, reg, value); - if (rc) - *value = 0xfffffffffffffffful; - return rc; -} - -/** - * scom_write - Write to a SCOM register - * @map: Result of scom_map - * @reg: Register index within that map - * @value: Value to write - * - * Returns 0 (success) or a negative error code - */ -static inline int scom_write(scom_map_t map, u64 reg, u64 value) -{ - return scom_controller->write(map, reg, value); -} - - -#endif /* CONFIG_PPC_SCOM */ -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_SCOM_H */ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4a1664a8658d7b..5a9b6eb651b6c0 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -61,17 +61,6 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) (unsigned long)_stext < end; } -static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end) -{ -#ifdef CONFIG_KVM_GUEST - extern char kvm_tmp[]; - return start < (unsigned long)kvm_tmp && - (unsigned long)&kvm_tmp[1024 * 1024] < end; -#else - return 0; -#endif -} - #ifdef PPC64_ELF_ABI_v1 #define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1 diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h index d995061f5f8620..e9f81bb3f83b08 100644 --- a/arch/powerpc/include/asm/setjmp.h +++ b/arch/powerpc/include/asm/setjmp.h @@ -7,7 +7,7 @@ #define JMP_BUF_LEN 23 -extern long setjmp(long *); -extern void longjmp(long *, long); +extern long setjmp(long *) __attribute__((returns_twice)); +extern void longjmp(long *, long) __attribute__((noreturn)); #endif /* _ASM_POWERPC_SETJMP_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index a47f827bc5f17b..e9a960e28f3c84 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -101,15 +101,43 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) /* We only yield to the hypervisor if we are in shared processor mode */ -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) -extern void __spin_yield(arch_spinlock_t *lock); -extern void __rw_yield(arch_rwlock_t *lock); +void splpar_spin_yield(arch_spinlock_t *lock); +void splpar_rw_yield(arch_rwlock_t *lock); #else /* SPLPAR */ -#define __spin_yield(x) barrier() -#define __rw_yield(x) barrier() -#define SHARED_PROCESSOR 0 +static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; +static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; #endif +static inline bool is_shared_processor(void) +{ +/* + * LPPACA is only available on Pseries so guard anything LPPACA related to + * allow other platforms (which include this common header) to compile. + */ +#ifdef CONFIG_PPC_PSERIES + return (IS_ENABLED(CONFIG_PPC_SPLPAR) && + lppaca_shared_proc(local_paca->lppaca_ptr)); +#else + return false; +#endif +} + +static inline void spin_yield(arch_spinlock_t *lock) +{ + if (is_shared_processor()) + splpar_spin_yield(lock); + else + barrier(); +} + +static inline void rw_yield(arch_rwlock_t *lock) +{ + if (is_shared_processor()) + splpar_rw_yield(lock); + else + barrier(); +} + static inline void arch_spin_lock(arch_spinlock_t *lock) { while (1) { @@ -117,8 +145,8 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) break; do { HMT_low(); - if (SHARED_PROCESSOR) - __spin_yield(lock); + if (is_shared_processor()) + splpar_spin_yield(lock); } while (unlikely(lock->slock != 0)); HMT_medium(); } @@ -136,8 +164,8 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) local_irq_restore(flags); do { HMT_low(); - if (SHARED_PROCESSOR) - __spin_yield(lock); + if (is_shared_processor()) + splpar_spin_yield(lock); } while (unlikely(lock->slock != 0)); HMT_medium(); local_irq_restore(flags_dis); @@ -226,8 +254,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) break; do { HMT_low(); - if (SHARED_PROCESSOR) - __rw_yield(rw); + if (is_shared_processor()) + splpar_rw_yield(rw); } while (unlikely(rw->lock < 0)); HMT_medium(); } @@ -240,8 +268,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) break; do { HMT_low(); - if (SHARED_PROCESSOR) - __rw_yield(rw); + if (is_shared_processor()) + splpar_rw_yield(rw); } while (unlikely(rw->lock != 0)); HMT_medium(); } @@ -281,9 +309,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) rw->lock = 0; } -#define arch_spin_relax(lock) __spin_yield(lock) -#define arch_read_relax(lock) __rw_yield(lock) -#define arch_write_relax(lock) __rw_yield(lock) +#define arch_spin_relax(lock) spin_yield(lock) +#define arch_read_relax(lock) rw_yield(lock) +#define arch_write_relax(lock) rw_yield(lock) /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index 9bf6dffb40900a..b72692702f35fe 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -53,7 +53,9 @@ void *__memmove(void *to, const void *from, __kernel_size_t n); #ifndef CONFIG_KASAN #define __HAVE_ARCH_MEMSET32 #define __HAVE_ARCH_MEMSET64 +#define __HAVE_ARCH_MEMCPY_MCSAFE +extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz); extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t); extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t); diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h new file mode 100644 index 00000000000000..85580b30aba482 --- /dev/null +++ b/arch/powerpc/include/asm/svm.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * SVM helper functions + * + * Copyright 2018 Anshuman Khandual, IBM Corporation. + */ + +#ifndef _ASM_POWERPC_SVM_H +#define _ASM_POWERPC_SVM_H + +#ifdef CONFIG_PPC_SVM + +static inline bool is_secure_guest(void) +{ + return mfmsr() & MSR_S; +} + +void dtl_cache_ctor(void *addr); +#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL) + +#else /* CONFIG_PPC_SVM */ + +static inline bool is_secure_guest(void) +{ + return false; +} + +#define get_dtl_cache_ctor() NULL + +#endif /* CONFIG_PPC_SVM */ +#endif /* _ASM_POWERPC_SVM_H */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 54f4ec1f9fab81..08dbe3e6831c53 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -41,11 +41,7 @@ struct div_result { /* Accessor functions for the timebase (RTC on 601) registers. */ /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ -#ifdef CONFIG_PPC_BOOK3S_32 -#define __USE_RTC() (cpu_has_feature(CPU_FTR_USE_RTC)) -#else -#define __USE_RTC() 0 -#endif +#define __USE_RTC() (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 926b9f91a3ef26..d2d2c4bd843589 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -17,38 +17,10 @@ typedef unsigned long cycles_t; static inline cycles_t get_cycles(void) { -#ifdef __powerpc64__ + if (IS_ENABLED(CONFIG_BOOK3S_601)) + return 0; + return mftb(); -#else - cycles_t ret; - - /* - * For the "cycle" counter we use the timebase lower half. - * Currently only used on SMP. - */ - - ret = 0; - - __asm__ __volatile__( -#ifdef CONFIG_PPC_8xx - "97: mftb %0\n" -#else - "97: mfspr %0, %2\n" -#endif - "99:\n" - ".section __ftr_fixup,\"a\"\n" - ".align 2\n" - "98:\n" - " .long %1\n" - " .long 0\n" - " .long 97b-98b\n" - " .long 99b-98b\n" - " .long 0\n" - " .long 0\n" - ".previous" - : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); - return ret; -#endif } #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 8b03eb44e876ee..15002b51ff18df 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -387,6 +387,20 @@ static inline unsigned long raw_copy_to_user(void __user *to, return ret; } +static __always_inline unsigned long __must_check +copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) +{ + if (likely(check_copy_size(from, n, true))) { + if (access_ok(to, n)) { + allow_write_to_user(to, n); + n = memcpy_mcsafe((void *)to, from, n); + prevent_write_to_user(to, n); + } + } + + return n; +} + extern unsigned long __clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h new file mode 100644 index 00000000000000..4fcda1d5793da9 --- /dev/null +++ b/arch/powerpc/include/asm/ultravisor-api.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Ultravisor API. + * + * Copyright 2019, IBM Corporation. + * + */ +#ifndef _ASM_POWERPC_ULTRAVISOR_API_H +#define _ASM_POWERPC_ULTRAVISOR_API_H + +#include + +/* Return codes */ +#define U_BUSY H_BUSY +#define U_FUNCTION H_FUNCTION +#define U_NOT_AVAILABLE H_NOT_AVAILABLE +#define U_P2 H_P2 +#define U_P3 H_P3 +#define U_P4 H_P4 +#define U_P5 H_P5 +#define U_PARAMETER H_PARAMETER +#define U_PERMISSION H_PERMISSION +#define U_SUCCESS H_SUCCESS + +/* opcodes */ +#define UV_WRITE_PATE 0xF104 +#define UV_RETURN 0xF11C +#define UV_ESM 0xF110 +#define UV_SHARE_PAGE 0xF130 +#define UV_UNSHARE_PAGE 0xF134 +#define UV_UNSHARE_ALL_PAGES 0xF140 + +#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */ diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h new file mode 100644 index 00000000000000..b1bc2e043ed483 --- /dev/null +++ b/arch/powerpc/include/asm/ultravisor.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Ultravisor definitions + * + * Copyright 2019, IBM Corporation. + * + */ +#ifndef _ASM_POWERPC_ULTRAVISOR_H +#define _ASM_POWERPC_ULTRAVISOR_H + +#include +#include +#include + +int early_init_dt_scan_ultravisor(unsigned long node, const char *uname, + int depth, void *data); + +/* + * In ultravisor enabled systems, PTCR becomes ultravisor privileged only for + * writing and an attempt to write to it will cause a Hypervisor Emulation + * Assistance interrupt. + */ +static inline void set_ptcr_when_no_uv(u64 val) +{ + if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) + mtspr(SPRN_PTCR, val); +} + +static inline int uv_register_pate(u64 lpid, u64 dw0, u64 dw1) +{ + return ucall_norets(UV_WRITE_PATE, lpid, dw0, dw1); +} + +static inline int uv_share_page(u64 pfn, u64 npages) +{ + return ucall_norets(UV_SHARE_PAGE, pfn, npages); +} + +static inline int uv_unshare_page(u64 pfn, u64 npages) +{ + return ucall_norets(UV_UNSHARE_PAGE, pfn, npages); +} + +static inline int uv_unshare_all_pages(void) +{ + return ucall_norets(UV_UNSHARE_ALL_PAGES); +} + +#endif /* _ASM_POWERPC_ULTRAVISOR_H */ diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 818989e1167892..24cdf97376c455 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -99,6 +99,7 @@ extern void xive_flush_interrupt(void); /* xmon hook */ extern void xmon_xive_do_dump(int cpu); +extern int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d); /* APIs used by KVM */ extern u32 xive_native_default_eq_shift(void); diff --git a/arch/powerpc/kernel/.gitignore b/arch/powerpc/kernel/.gitignore index c5f676c3c224b6..67ebd3003c0504 100644 --- a/arch/powerpc/kernel/.gitignore +++ b/arch/powerpc/kernel/.gitignore @@ -1 +1,2 @@ +prom_init_check vmlinux.lds diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 56dfa7a2a6f2a0..a7ca8fe623686a 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -52,7 +52,7 @@ obj-y := cputable.o ptrace.o syscalls.o \ of_platform.o prom_parse.o obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ signal_64.o ptrace32.o \ - paca.o nvram_64.o firmware.o + paca.o nvram_64.o firmware.o note.o obj-$(CONFIG_VDSO32) += vdso32/ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o @@ -78,7 +78,9 @@ obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ eeh_driver.o eeh_event.o eeh_sysfs.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -obj-$(CONFIG_FA_DUMP) += fadump.o +ifneq ($(CONFIG_FA_DUMP)$(CONFIG_PRESERVE_FA_DUMP),) +obj-y += fadump.o +endif ifdef CONFIG_PPC32 obj-$(CONFIG_E500) += idle_e500.o endif @@ -155,6 +157,9 @@ endif obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o +ifneq ($(CONFIG_PPC_POWERNV)$(CONFIG_PPC_SVM),) +obj-y += ucall.o +endif # Disable GCOV, KCOV & sanitizers in odd or sensitive code GCOV_PROFILE_prom_init.o := n @@ -184,15 +189,13 @@ extra-$(CONFIG_ALTIVEC) += vector.o extra-$(CONFIG_PPC64) += entry_64.o extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o -ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE -$(obj)/built-in.a: prom_init_check +extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check -quiet_cmd_prom_init_check = CALL $< - cmd_prom_init_check = $(CONFIG_SHELL) $< "$(NM)" "$(obj)/prom_init.o" +quiet_cmd_prom_init_check = PROMCHK $@ + cmd_prom_init_check = $(CONFIG_SHELL) $< "$(NM)" $(obj)/prom_init.o; touch $@ -PHONY += prom_init_check -prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o - $(call cmd,prom_init_check) -endif +$(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE + $(call if_changed,prom_init_check) +targets += prom_init_check clean-files := vmlinux.lds diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 4ccb6b3a7fbd6b..484f54dab24753 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -506,6 +506,7 @@ int main(void) OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v); OFFSET(KVM_RADIX, kvm, arch.radix); OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled); + OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest); OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr); OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar); OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index bfe5f4a2886beb..e745abc5457a0b 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -569,7 +569,7 @@ static struct cpu_spec __initdata cpu_specs[] = { #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_PPC32 -#ifdef CONFIG_PPC_BOOK3S_32 +#ifdef CONFIG_PPC_BOOK3S_601 { /* 601 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00010000, @@ -583,6 +583,8 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_generic, .platform = "ppc601", }, +#endif /* CONFIG_PPC_BOOK3S_601 */ +#ifdef CONFIG_PPC_BOOK3S_6xx { /* 603 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00030000, @@ -1212,7 +1214,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_generic, .platform = "ppc603", }, -#endif /* CONFIG_PPC_BOOK3S_32 */ +#endif /* CONFIG_PPC_BOOK3S_6xx */ #ifdef CONFIG_PPC_8xx { /* 8xx */ .pvr_mask = 0xffff0000, diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 2f5a53874f6d43..e486d1d78de288 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -122,18 +122,17 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) { struct iommu_table *tbl = get_iommu_table_base(dev); - if (!tbl) { - dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" - ", table unavailable\n", mask); - return 0; - } - if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { dev->archdata.iommu_bypass = true; dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); return 1; } + if (!tbl) { + dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask); + return 0; + } + if (tbl->it_offset > (mask >> tbl->it_page_shift)) { dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index c0e4b73191f3ab..0a91dee51245fc 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -150,6 +150,16 @@ static int __init eeh_setup(char *str) } __setup("eeh=", eeh_setup); +void eeh_show_enabled(void) +{ + if (eeh_has_flag(EEH_FORCE_DISABLED)) + pr_info("EEH: Recovery disabled by kernel parameter.\n"); + else if (eeh_has_flag(EEH_ENABLED)) + pr_info("EEH: Capable adapter found: recovery enabled.\n"); + else + pr_info("EEH: No capable adapters found: recovery disabled.\n"); +} + /* * This routine captures assorted PCI configuration space data * for the indicated PCI device, and puts them into a buffer @@ -410,11 +420,9 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) eeh_pe_mark_isolated(phb_pe); eeh_serialize_unlock(flags); - pr_err("EEH: PHB#%x failure detected, location: %s\n", + pr_debug("EEH: PHB#%x failure detected, location: %s\n", phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe)); - dump_stack(); eeh_send_failure_event(phb_pe); - return 1; out: eeh_serialize_unlock(flags); @@ -441,7 +449,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) unsigned long flags; struct device_node *dn; struct pci_dev *dev; - struct eeh_pe *pe, *parent_pe, *phb_pe; + struct eeh_pe *pe, *parent_pe; int rc = 0; const char *location = NULL; @@ -460,8 +468,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) /* Access to IO BARs might get this far and still not want checking. */ if (!pe) { eeh_stats.ignored_check++; - pr_debug("EEH: Ignored check for %s\n", - eeh_pci_name(dev)); + eeh_edev_dbg(edev, "Ignored check\n"); return 0; } @@ -501,12 +508,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev) if (dn) location = of_get_property(dn, "ibm,loc-code", NULL); - printk(KERN_ERR "EEH: %d reads ignored for recovering device at " - "location=%s driver=%s pci addr=%s\n", + eeh_edev_err(edev, "%d reads ignored for recovering device at location=%s driver=%s\n", pe->check_count, location ? location : "unknown", - eeh_driver_name(dev), eeh_pci_name(dev)); - printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", + eeh_driver_name(dev)); + eeh_edev_err(edev, "Might be infinite loop in %s driver\n", eeh_driver_name(dev)); dump_stack(); } @@ -573,13 +579,8 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * a stack trace will help the device-driver authors figure * out what happened. So print that out. */ - phb_pe = eeh_phb_pe_get(pe->phb); - pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", - pe->phb->global_number, pe->addr); - pr_err("EEH: PE location: %s, PHB location: %s\n", - eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe)); - dump_stack(); - + pr_debug("EEH: %s: Frozen PHB#%x-PE#%x detected\n", + __func__, pe->phb->global_number, pe->addr); eeh_send_failure_event(pe); return 1; @@ -697,7 +698,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) return rc; } -static void *eeh_disable_and_save_dev_state(struct eeh_dev *edev, +static void eeh_disable_and_save_dev_state(struct eeh_dev *edev, void *userdata) { struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); @@ -708,7 +709,7 @@ static void *eeh_disable_and_save_dev_state(struct eeh_dev *edev, * state for the specified device */ if (!pdev || pdev == dev) - return NULL; + return; /* Ensure we have D0 power state */ pci_set_power_state(pdev, PCI_D0); @@ -721,18 +722,16 @@ static void *eeh_disable_and_save_dev_state(struct eeh_dev *edev, * interrupt from the device */ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); - - return NULL; } -static void *eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) +static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) { struct pci_dn *pdn = eeh_dev_to_pdn(edev); struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); struct pci_dev *dev = userdata; if (!pdev) - return NULL; + return; /* Apply customization from firmware */ if (pdn && eeh_ops->restore_config) @@ -741,8 +740,6 @@ static void *eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) /* The caller should restore state for the specified device */ if (pdev != dev) pci_restore_state(pdev); - - return NULL; } int eeh_restore_vf_config(struct pci_dn *pdn) @@ -868,7 +865,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat * the indicated device and its children so that the bunch of the * devices could be reset properly. */ -static void *eeh_set_dev_freset(struct eeh_dev *edev, void *flag) +static void eeh_set_dev_freset(struct eeh_dev *edev, void *flag) { struct pci_dev *dev; unsigned int *freset = (unsigned int *)flag; @@ -876,8 +873,6 @@ static void *eeh_set_dev_freset(struct eeh_dev *edev, void *flag) dev = eeh_dev_to_pci_dev(edev); if (dev) *freset |= dev->needs_freset; - - return NULL; } static void eeh_pe_refreeze_passed(struct eeh_pe *root) @@ -1063,23 +1058,6 @@ static struct notifier_block eeh_reboot_nb = { .notifier_call = eeh_reboot_notifier, }; -void eeh_probe_devices(void) -{ - struct pci_controller *hose, *tmp; - struct pci_dn *pdn; - - /* Enable EEH for all adapters */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { - pdn = hose->pci_data; - traverse_pci_dn(pdn, eeh_ops->probe, NULL); - } - if (eeh_enabled()) - pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); - else - pr_info("EEH: No capable adapters found\n"); - -} - /** * eeh_init - EEH initialization * @@ -1120,6 +1098,8 @@ static int eeh_init(void) list_for_each_entry_safe(hose, tmp, &hose_list, list_node) eeh_dev_phb_init_dynamic(hose); + eeh_addr_cache_init(); + /* Initialize EEH event */ return eeh_event_init(); } @@ -1190,15 +1170,14 @@ void eeh_add_device_late(struct pci_dev *dev) struct pci_dn *pdn; struct eeh_dev *edev; - if (!dev || !eeh_enabled()) + if (!dev) return; - pr_debug("EEH: Adding device %s\n", pci_name(dev)); - pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); edev = pdn_to_eeh_dev(pdn); + eeh_edev_dbg(edev, "Adding device\n"); if (edev->pdev == dev) { - pr_debug("EEH: Already referenced !\n"); + eeh_edev_dbg(edev, "Device already referenced!\n"); return; } @@ -1246,6 +1225,8 @@ void eeh_add_device_tree_late(struct pci_bus *bus) { struct pci_dev *dev; + if (eeh_has_flag(EEH_FORCE_DISABLED)) + return; list_for_each_entry(dev, &bus->devices, bus_list) { eeh_add_device_late(dev); if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { @@ -1299,10 +1280,10 @@ void eeh_remove_device(struct pci_dev *dev) edev = pci_dev_to_eeh_dev(dev); /* Unregister the device with the EEH/PCI address search system */ - pr_debug("EEH: Removing device %s\n", pci_name(dev)); + dev_dbg(&dev->dev, "EEH: Removing device\n"); if (!edev || !edev->pdev || !edev->pe) { - pr_debug("EEH: Not referenced !\n"); + dev_dbg(&dev->dev, "EEH: Device not referenced!\n"); return; } @@ -1890,6 +1871,198 @@ static const struct file_operations eeh_force_recover_fops = { .llseek = no_llseek, .write = eeh_force_recover_write, }; + +static ssize_t eeh_debugfs_dev_usage(struct file *filp, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + static const char usage[] = "input format: ::.\n"; + + return simple_read_from_buffer(user_buf, count, ppos, + usage, sizeof(usage) - 1); +} + +static ssize_t eeh_dev_check_write(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + uint32_t domain, bus, dev, fn; + struct pci_dev *pdev; + struct eeh_dev *edev; + char buf[20]; + int ret; + + memset(buf, 0, sizeof(buf)); + ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); + if (!ret) + return -EFAULT; + + ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); + if (ret != 4) { + pr_err("%s: expected 4 args, got %d\n", __func__, ret); + return -EINVAL; + } + + pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); + if (!pdev) + return -ENODEV; + + edev = pci_dev_to_eeh_dev(pdev); + if (!edev) { + pci_err(pdev, "No eeh_dev for this device!\n"); + pci_dev_put(pdev); + return -ENODEV; + } + + ret = eeh_dev_check_failure(edev); + pci_info(pdev, "eeh_dev_check_failure(%04x:%02x:%02x.%01x) = %d\n", + domain, bus, dev, fn, ret); + + pci_dev_put(pdev); + + return count; +} + +static const struct file_operations eeh_dev_check_fops = { + .open = simple_open, + .llseek = no_llseek, + .write = eeh_dev_check_write, + .read = eeh_debugfs_dev_usage, +}; + +static int eeh_debugfs_break_device(struct pci_dev *pdev) +{ + struct resource *bar = NULL; + void __iomem *mapped; + u16 old, bit; + int i, pos; + + /* Do we have an MMIO BAR to disable? */ + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + struct resource *r = &pdev->resource[i]; + + if (!r->flags || !r->start) + continue; + if (r->flags & IORESOURCE_IO) + continue; + if (r->flags & IORESOURCE_UNSET) + continue; + + bar = r; + break; + } + + if (!bar) { + pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n"); + return -ENXIO; + } + + pci_err(pdev, "Going to break: %pR\n", bar); + + if (pdev->is_virtfn) { +#ifndef CONFIG_IOV + return -ENXIO; +#else + /* + * VFs don't have a per-function COMMAND register, so the best + * we can do is clear the Memory Space Enable bit in the PF's + * SRIOV control reg. + * + * Unfortunately, this requires that we have a PF (i.e doesn't + * work for a passed-through VF) and it has the potential side + * effect of also causing an EEH on every other VF under the + * PF. Oh well. + */ + pdev = pdev->physfn; + if (!pdev) + return -ENXIO; /* passed through VFs have no PF */ + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pos += PCI_SRIOV_CTRL; + bit = PCI_SRIOV_CTRL_MSE; +#endif /* !CONFIG_IOV */ + } else { + bit = PCI_COMMAND_MEMORY; + pos = PCI_COMMAND; + } + + /* + * Process here is: + * + * 1. Disable Memory space. + * + * 2. Perform an MMIO to the device. This should result in an error + * (CA / UR) being raised by the device which results in an EEH + * PE freeze. Using the in_8() accessor skips the eeh detection hook + * so the freeze hook so the EEH Detection machinery won't be + * triggered here. This is to match the usual behaviour of EEH + * where the HW will asyncronously freeze a PE and it's up to + * the kernel to notice and deal with it. + * + * 3. Turn Memory space back on. This is more important for VFs + * since recovery will probably fail if we don't. For normal + * the COMMAND register is reset as a part of re-initialising + * the device. + * + * Breaking stuff is the point so who cares if it's racy ;) + */ + pci_read_config_word(pdev, pos, &old); + + mapped = ioremap(bar->start, PAGE_SIZE); + if (!mapped) { + pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar); + return -ENXIO; + } + + pci_write_config_word(pdev, pos, old & ~bit); + in_8(mapped); + pci_write_config_word(pdev, pos, old); + + iounmap(mapped); + + return 0; +} + +static ssize_t eeh_dev_break_write(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + uint32_t domain, bus, dev, fn; + struct pci_dev *pdev; + char buf[20]; + int ret; + + memset(buf, 0, sizeof(buf)); + ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); + if (!ret) + return -EFAULT; + + ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); + if (ret != 4) { + pr_err("%s: expected 4 args, got %d\n", __func__, ret); + return -EINVAL; + } + + pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); + if (!pdev) + return -ENODEV; + + ret = eeh_debugfs_break_device(pdev); + pci_dev_put(pdev); + + if (ret < 0) + return ret; + + return count; +} + +static const struct file_operations eeh_dev_break_fops = { + .open = simple_open, + .llseek = no_llseek, + .write = eeh_dev_break_write, + .read = eeh_debugfs_dev_usage, +}; + #endif static int __init eeh_init_proc(void) @@ -1905,6 +2078,12 @@ static int __init eeh_init_proc(void) debugfs_create_bool("eeh_disable_recovery", 0600, powerpc_debugfs_root, &eeh_debugfs_no_recover); + debugfs_create_file_unsafe("eeh_dev_check", 0600, + powerpc_debugfs_root, NULL, + &eeh_dev_check_fops); + debugfs_create_file_unsafe("eeh_dev_break", 0600, + powerpc_debugfs_root, NULL, + &eeh_dev_break_fops); debugfs_create_file_unsafe("eeh_force_recover", 0600, powerpc_debugfs_root, NULL, &eeh_force_recover_fops); diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index 05ffd32b3416c6..cf11277ebd0209 100644 --- a/arch/powerpc/kernel/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c @@ -148,8 +148,8 @@ eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo, piar->pcidev = dev; piar->flags = flags; - pr_debug("PIAR: insert range=[%pap:%pap] dev=%s\n", - &alo, &ahi, pci_name(dev)); + eeh_edev_dbg(piar->edev, "PIAR: insert range=[%pap:%pap]\n", + &alo, &ahi); rb_link_node(&piar->rb_node, parent, p); rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root); @@ -229,8 +229,8 @@ static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev) piar = rb_entry(n, struct pci_io_addr_range, rb_node); if (piar->pcidev == dev) { - pr_debug("PIAR: remove range=[%pap:%pap] dev=%s\n", - &piar->addr_lo, &piar->addr_hi, pci_name(dev)); + eeh_edev_dbg(piar->edev, "PIAR: remove range=[%pap:%pap]\n", + &piar->addr_lo, &piar->addr_hi); rb_erase(n, &pci_io_addr_cache_root.rb_root); kfree(piar); goto restart; @@ -258,37 +258,14 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev) } /** - * eeh_addr_cache_build - Build a cache of I/O addresses + * eeh_addr_cache_init - Initialize a cache of I/O addresses * - * Build a cache of pci i/o addresses. This cache will be used to + * Initialize a cache of pci i/o addresses. This cache will be used to * find the pci device that corresponds to a given address. - * This routine scans all pci busses to build the cache. - * Must be run late in boot process, after the pci controllers - * have been scanned for devices (after all device resources are known). */ -void eeh_addr_cache_build(void) +void eeh_addr_cache_init(void) { - struct pci_dn *pdn; - struct eeh_dev *edev; - struct pci_dev *dev = NULL; - spin_lock_init(&pci_io_addr_cache_root.piar_lock); - - for_each_pci_dev(dev) { - pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); - if (!pdn) - continue; - - edev = pdn_to_eeh_dev(pdn); - if (!edev) - continue; - - dev->dev.archdata.edev = edev; - edev->pdev = dev; - - eeh_addr_cache_insert_dev(dev); - eeh_sysfs_add_device(dev); - } } static int eeh_addr_cache_show(struct seq_file *s, void *v) diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index c4317c452d98e7..7370185c7a0517 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c @@ -47,6 +47,8 @@ struct eeh_dev *eeh_dev_init(struct pci_dn *pdn) /* Associate EEH device with OF node */ pdn->edev = edev; edev->pdn = pdn; + edev->bdfn = (pdn->busno << 8) | pdn->devfn; + edev->controller = pdn->phb; return edev; } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 89623962c72752..d9279d0ee9f54f 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -81,23 +82,6 @@ static const char *pci_ers_result_name(enum pci_ers_result result) } }; -static __printf(2, 3) void eeh_edev_info(const struct eeh_dev *edev, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - printk(KERN_INFO "EEH: PE#%x (PCI %s): %pV\n", edev->pe_config_addr, - edev->pdev ? dev_name(&edev->pdev->dev) : "none", &vaf); - - va_end(args); -} - static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old, enum pci_ers_result new) { @@ -113,8 +97,16 @@ static bool eeh_dev_removed(struct eeh_dev *edev) static bool eeh_edev_actionable(struct eeh_dev *edev) { - return (edev->pdev && !eeh_dev_removed(edev) && - !eeh_pe_passed(edev->pe)); + if (!edev->pdev) + return false; + if (edev->pdev->error_state == pci_channel_io_perm_failure) + return false; + if (eeh_dev_removed(edev)) + return false; + if (eeh_pe_passed(edev->pe)) + return false; + + return true; } /** @@ -214,12 +206,12 @@ static void eeh_enable_irq(struct eeh_dev *edev) } } -static void *eeh_dev_save_state(struct eeh_dev *edev, void *userdata) +static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata) { struct pci_dev *pdev; if (!edev) - return NULL; + return; /* * We cannot access the config space on some adapters. @@ -229,14 +221,13 @@ static void *eeh_dev_save_state(struct eeh_dev *edev, void *userdata) * device is created. */ if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) - return NULL; + return; pdev = eeh_dev_to_pci_dev(edev); if (!pdev) - return NULL; + return; pci_save_state(pdev); - return NULL; } static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s) @@ -274,20 +265,27 @@ static void eeh_set_irq_state(struct eeh_pe *root, bool enable) } typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *, + struct pci_dev *, struct pci_driver *); static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, enum pci_ers_result *result) { + struct pci_dev *pdev; struct pci_driver *driver; enum pci_ers_result new_result; - if (!edev->pdev) { + pci_lock_rescan_remove(); + pdev = edev->pdev; + if (pdev) + get_device(&pdev->dev); + pci_unlock_rescan_remove(); + if (!pdev) { eeh_edev_info(edev, "no device"); return; } - device_lock(&edev->pdev->dev); + device_lock(&pdev->dev); if (eeh_edev_actionable(edev)) { - driver = eeh_pcid_get(edev->pdev); + driver = eeh_pcid_get(pdev); if (!driver) eeh_edev_info(edev, "no driver"); @@ -296,7 +294,7 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, else if (edev->mode & EEH_DEV_NO_HANDLER) eeh_edev_info(edev, "driver bound too late"); else { - new_result = fn(edev, driver); + new_result = fn(edev, pdev, driver); eeh_edev_info(edev, "%s driver reports: '%s'", driver->name, pci_ers_result_name(new_result)); @@ -305,12 +303,15 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, new_result); } if (driver) - eeh_pcid_put(edev->pdev); + eeh_pcid_put(pdev); } else { - eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!edev->pdev, + eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev, !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe)); } - device_unlock(&edev->pdev->dev); + device_unlock(&pdev->dev); + if (edev->pdev != pdev) + eeh_edev_warn(edev, "Device changed during processing!\n"); + put_device(&pdev->dev); } static void eeh_pe_report(const char *name, struct eeh_pe *root, @@ -337,20 +338,20 @@ static void eeh_pe_report(const char *name, struct eeh_pe *root, * Report an EEH error to each device driver. */ static enum pci_ers_result eeh_report_error(struct eeh_dev *edev, + struct pci_dev *pdev, struct pci_driver *driver) { enum pci_ers_result rc; - struct pci_dev *dev = edev->pdev; if (!driver->err_handler->error_detected) return PCI_ERS_RESULT_NONE; eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)", driver->name); - rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); + rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen); edev->in_error = true; - pci_uevent_ers(dev, PCI_ERS_RESULT_NONE); + pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE); return rc; } @@ -363,12 +364,13 @@ static enum pci_ers_result eeh_report_error(struct eeh_dev *edev, * are now enabled. */ static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev, + struct pci_dev *pdev, struct pci_driver *driver) { if (!driver->err_handler->mmio_enabled) return PCI_ERS_RESULT_NONE; eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name); - return driver->err_handler->mmio_enabled(edev->pdev); + return driver->err_handler->mmio_enabled(pdev); } /** @@ -382,20 +384,21 @@ static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev, * driver can work again while the device is recovered. */ static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev, + struct pci_dev *pdev, struct pci_driver *driver) { if (!driver->err_handler->slot_reset || !edev->in_error) return PCI_ERS_RESULT_NONE; eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name); - return driver->err_handler->slot_reset(edev->pdev); + return driver->err_handler->slot_reset(pdev); } -static void *eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) +static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) { struct pci_dev *pdev; if (!edev) - return NULL; + return; /* * The content in the config space isn't saved because @@ -407,15 +410,14 @@ static void *eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) if (list_is_last(&edev->entry, &edev->pe->edevs)) eeh_pe_restore_bars(edev->pe); - return NULL; + return; } pdev = eeh_dev_to_pci_dev(edev); if (!pdev) - return NULL; + return; pci_restore_state(pdev); - return NULL; } /** @@ -428,13 +430,14 @@ static void *eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) * to make the recovered device work again. */ static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev, + struct pci_dev *pdev, struct pci_driver *driver) { if (!driver->err_handler->resume || !edev->in_error) return PCI_ERS_RESULT_NONE; eeh_edev_info(edev, "Invoking %s->resume()", driver->name); - driver->err_handler->resume(edev->pdev); + driver->err_handler->resume(pdev); pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED); #ifdef CONFIG_PCI_IOV @@ -453,6 +456,7 @@ static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev, * dead, and that no further recovery attempts will be made on it. */ static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev, + struct pci_dev *pdev, struct pci_driver *driver) { enum pci_ers_result rc; @@ -462,10 +466,10 @@ static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev, eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)", driver->name); - rc = driver->err_handler->error_detected(edev->pdev, + rc = driver->err_handler->error_detected(pdev, pci_channel_io_perm_failure); - pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_DISCONNECT); + pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT); return rc; } @@ -473,12 +477,9 @@ static void *eeh_add_virt_device(struct eeh_dev *edev) { struct pci_driver *driver; struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - struct pci_dn *pdn = eeh_dev_to_pdn(edev); if (!(edev->physfn)) { - pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n", - __func__, pdn->phb->global_number, pdn->busno, - PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); + eeh_edev_warn(edev, "Not for VF\n"); return NULL; } @@ -492,12 +493,12 @@ static void *eeh_add_virt_device(struct eeh_dev *edev) } #ifdef CONFIG_PCI_IOV - pci_iov_add_virtfn(edev->physfn, pdn->vf_index); + pci_iov_add_virtfn(edev->physfn, eeh_dev_to_pdn(edev)->vf_index); #endif return NULL; } -static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) +static void eeh_rmv_device(struct eeh_dev *edev, void *userdata) { struct pci_driver *driver; struct pci_dev *dev = eeh_dev_to_pci_dev(edev); @@ -512,7 +513,7 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) */ if (!eeh_edev_actionable(edev) || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) - return NULL; + return; if (rmv_data) { driver = eeh_pcid_get(dev); @@ -521,7 +522,7 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) driver->err_handler->error_detected && driver->err_handler->slot_reset) { eeh_pcid_put(dev); - return NULL; + return; } eeh_pcid_put(dev); } @@ -554,8 +555,6 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) pci_stop_and_remove_bus_device(dev); pci_unlock_rescan_remove(); } - - return NULL; } static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata) @@ -744,6 +743,99 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, */ #define MAX_WAIT_FOR_RECOVERY 300 + +/* Walks the PE tree after processing an event to remove any stale PEs. + * + * NB: This needs to be recursive to ensure the leaf PEs get removed + * before their parents do. Although this is possible to do recursively + * we don't since this is easier to read and we need to garantee + * the leaf nodes will be handled first. + */ +static void eeh_pe_cleanup(struct eeh_pe *pe) +{ + struct eeh_pe *child_pe, *tmp; + + list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child) + eeh_pe_cleanup(child_pe); + + if (pe->state & EEH_PE_KEEP) + return; + + if (!(pe->state & EEH_PE_INVALID)) + return; + + if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) { + list_del(&pe->child); + kfree(pe); + } +} + +/** + * eeh_check_slot_presence - Check if a device is still present in a slot + * @pdev: pci_dev to check + * + * This function may return a false positive if we can't determine the slot's + * presence state. This might happen for for PCIe slots if the PE containing + * the upstream bridge is also frozen, or the bridge is part of the same PE + * as the device. + * + * This shouldn't happen often, but you might see it if you hotplug a PCIe + * switch. + */ +static bool eeh_slot_presence_check(struct pci_dev *pdev) +{ + const struct hotplug_slot_ops *ops; + struct pci_slot *slot; + u8 state; + int rc; + + if (!pdev) + return false; + + if (pdev->error_state == pci_channel_io_perm_failure) + return false; + + slot = pdev->slot; + if (!slot || !slot->hotplug) + return true; + + ops = slot->hotplug->ops; + if (!ops || !ops->get_adapter_status) + return true; + + /* set the attention indicator while we've got the slot ops */ + if (ops->set_attention_status) + ops->set_attention_status(slot->hotplug, 1); + + rc = ops->get_adapter_status(slot->hotplug, &state); + if (rc) + return true; + + return !!state; +} + +static void eeh_clear_slot_attention(struct pci_dev *pdev) +{ + const struct hotplug_slot_ops *ops; + struct pci_slot *slot; + + if (!pdev) + return; + + if (pdev->error_state == pci_channel_io_perm_failure) + return; + + slot = pdev->slot; + if (!slot || !slot->hotplug) + return; + + ops = slot->hotplug->ops; + if (!ops || !ops->set_attention_status) + return; + + ops->set_attention_status(slot->hotplug, 0); +} + /** * eeh_handle_normal_event - Handle EEH events on a specific PE * @pe: EEH PE - which should not be used after we return, as it may @@ -774,6 +866,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) enum pci_ers_result result = PCI_ERS_RESULT_NONE; struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0}; + int devices = 0; bus = eeh_pe_bus_get(pe); if (!bus) { @@ -782,7 +875,59 @@ void eeh_handle_normal_event(struct eeh_pe *pe) return; } - eeh_pe_state_mark(pe, EEH_PE_RECOVERING); + /* + * When devices are hot-removed we might get an EEH due to + * a driver attempting to touch the MMIO space of a removed + * device. In this case we don't have a device to recover + * so suppress the event if we can't find any present devices. + * + * The hotplug driver should take care of tearing down the + * device itself. + */ + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp) + if (eeh_slot_presence_check(edev->pdev)) + devices++; + + if (!devices) { + pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n", + pe->phb->global_number, pe->addr); + goto out; /* nothing to recover */ + } + + /* Log the event */ + if (pe->type & EEH_PE_PHB) { + pr_err("EEH: PHB#%x failure detected, location: %s\n", + pe->phb->global_number, eeh_pe_loc_get(pe)); + } else { + struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb); + + pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", + pe->phb->global_number, pe->addr); + pr_err("EEH: PE location: %s, PHB location: %s\n", + eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe)); + } + +#ifdef CONFIG_STACKTRACE + /* + * Print the saved stack trace now that we've verified there's + * something to recover. + */ + if (pe->trace_entries) { + void **ptrs = (void **) pe->stack_trace; + int i; + + pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", + pe->phb->global_number, pe->addr); + + /* FIXME: Use the same format as dump_stack() */ + pr_err("EEH: Call Trace:\n"); + for (i = 0; i < pe->trace_entries; i++) + pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]); + + pe->trace_entries = 0; + } +#endif /* CONFIG_STACKTRACE */ eeh_pe_update_time_stamp(pe); pe->freeze_count++; @@ -793,6 +938,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe) result = PCI_ERS_RESULT_DISCONNECT; } + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp) + edev->mode &= ~EEH_DEV_NO_HANDLER; + /* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs * to accomplish the reset. Each child gets a report of the @@ -969,6 +1118,19 @@ void eeh_handle_normal_event(struct eeh_pe *pe) return; } } + +out: + /* + * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING + * we don't want to modify the PE tree structure so we do it here. + */ + eeh_pe_cleanup(pe); + + /* clear the slot attention LED for all recovered devices */ + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp) + eeh_clear_slot_attention(edev->pdev); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); } @@ -981,7 +1143,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe) */ void eeh_handle_special_event(void) { - struct eeh_pe *pe, *phb_pe; + struct eeh_pe *pe, *phb_pe, *tmp_pe; + struct eeh_dev *edev, *tmp_edev; struct pci_bus *bus; struct pci_controller *hose; unsigned long flags; @@ -1040,6 +1203,7 @@ void eeh_handle_special_event(void) */ if (rc == EEH_NEXT_ERR_FROZEN_PE || rc == EEH_NEXT_ERR_FENCED_PHB) { + eeh_pe_state_mark(pe, EEH_PE_RECOVERING); eeh_handle_normal_event(pe); } else { pci_lock_rescan_remove(); @@ -1050,6 +1214,10 @@ void eeh_handle_special_event(void) (phb_pe->state & EEH_PE_RECOVERING)) continue; + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) + edev->mode &= ~EEH_DEV_NO_HANDLER; + /* Notify all devices to be down */ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); eeh_set_channel_state(pe, pci_channel_io_perm_failure); diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index 64cfbe41174b27..a7a8dc182efb98 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -40,7 +40,6 @@ static int eeh_event_handler(void * dummy) { unsigned long flags; struct eeh_event *event; - struct eeh_pe *pe; while (!kthread_should_stop()) { if (wait_for_completion_interruptible(&eeh_eventlist_event)) @@ -59,19 +58,10 @@ static int eeh_event_handler(void * dummy) continue; /* We might have event without binding PE */ - pe = event->pe; - if (pe) { - if (pe->type & EEH_PE_PHB) - pr_info("EEH: Detected error on PHB#%x\n", - pe->phb->global_number); - else - pr_info("EEH: Detected PCI bus error on " - "PHB#%x-PE#%x\n", - pe->phb->global_number, pe->addr); - eeh_handle_normal_event(pe); - } else { + if (event->pe) + eeh_handle_normal_event(event->pe); + else eeh_handle_special_event(); - } kfree(event); } @@ -121,6 +111,24 @@ int __eeh_send_failure_event(struct eeh_pe *pe) } event->pe = pe; + /* + * Mark the PE as recovering before inserting it in the queue. + * This prevents the PE from being free()ed by a hotplug driver + * while the PE is sitting in the event queue. + */ + if (pe) { +#ifdef CONFIG_STACKTRACE + /* + * Save the current stack trace so we can dump it from the + * event handler thread. + */ + pe->trace_entries = stack_trace_save(pe->stack_trace, + ARRAY_SIZE(pe->stack_trace), 0); +#endif /* CONFIG_STACKTRACE */ + + eeh_pe_state_mark(pe, EEH_PE_RECOVERING); + } + /* We may or may not be called in an interrupt context */ spin_lock_irqsave(&eeh_eventlist_lock, flags); list_add(&event->list, &eeh_eventlist); diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 854cef7b18f4db..177852e39a2534 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -231,29 +231,22 @@ void *eeh_pe_traverse(struct eeh_pe *root, * The function is used to traverse the devices of the specified * PE and its child PEs. */ -void *eeh_pe_dev_traverse(struct eeh_pe *root, +void eeh_pe_dev_traverse(struct eeh_pe *root, eeh_edev_traverse_func fn, void *flag) { struct eeh_pe *pe; struct eeh_dev *edev, *tmp; - void *ret; if (!root) { pr_warn("%s: Invalid PE %p\n", __func__, root); - return NULL; + return; } /* Traverse root PE */ - eeh_for_each_pe(root, pe) { - eeh_pe_for_each_dev(pe, edev, tmp) { - ret = fn(edev, flag); - if (ret) - return ret; - } - } - - return NULL; + eeh_for_each_pe(root, pe) + eeh_pe_for_each_dev(pe, edev, tmp) + fn(edev, flag); } /** @@ -379,8 +372,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) /* Check if the PE number is valid */ if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { - pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%x\n", - __func__, config_addr, pdn->phb->global_number); + eeh_edev_err(edev, "PE#0 is invalid for this PHB!\n"); return -EINVAL; } @@ -391,42 +383,34 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) * components. */ pe = eeh_pe_get(pdn->phb, edev->pe_config_addr, config_addr); - if (pe && !(pe->type & EEH_PE_INVALID)) { - /* Mark the PE as type of PCI bus */ - pe->type = EEH_PE_BUS; - edev->pe = pe; - - /* Put the edev to PE */ - list_add_tail(&edev->entry, &pe->edevs); - pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n", - pdn->phb->global_number, - pdn->busno, - PCI_SLOT(pdn->devfn), - PCI_FUNC(pdn->devfn), - pe->addr); - return 0; - } else if (pe && (pe->type & EEH_PE_INVALID)) { - list_add_tail(&edev->entry, &pe->edevs); - edev->pe = pe; - /* - * We're running to here because of PCI hotplug caused by - * EEH recovery. We need clear EEH_PE_INVALID until the top. - */ - parent = pe; - while (parent) { - if (!(parent->type & EEH_PE_INVALID)) - break; - parent->type &= ~EEH_PE_INVALID; - parent = parent->parent; - } + if (pe) { + if (pe->type & EEH_PE_INVALID) { + list_add_tail(&edev->entry, &pe->edevs); + edev->pe = pe; + /* + * We're running to here because of PCI hotplug caused by + * EEH recovery. We need clear EEH_PE_INVALID until the top. + */ + parent = pe; + while (parent) { + if (!(parent->type & EEH_PE_INVALID)) + break; + parent->type &= ~EEH_PE_INVALID; + parent = parent->parent; + } + + eeh_edev_dbg(edev, + "Added to device PE (parent: PE#%x)\n", + pe->parent->addr); + } else { + /* Mark the PE as type of PCI bus */ + pe->type = EEH_PE_BUS; + edev->pe = pe; - pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device " - "PE#%x, Parent PE#%x\n", - pdn->phb->global_number, - pdn->busno, - PCI_SLOT(pdn->devfn), - PCI_FUNC(pdn->devfn), - pe->addr, pe->parent->addr); + /* Put the edev to PE */ + list_add_tail(&edev->entry, &pe->edevs); + eeh_edev_dbg(edev, "Added to bus PE\n"); + } return 0; } @@ -468,13 +452,8 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) list_add_tail(&pe->child, &parent->child_list); list_add_tail(&edev->entry, &pe->edevs); edev->pe = pe; - pr_debug("EEH: Add %04x:%02x:%02x.%01x to " - "Device PE#%x, Parent PE#%x\n", - pdn->phb->global_number, - pdn->busno, - PCI_SLOT(pdn->devfn), - PCI_FUNC(pdn->devfn), - pe->addr, pe->parent->addr); + eeh_edev_dbg(edev, "Added to device PE (parent: PE#%x)\n", + pe->parent->addr); return 0; } @@ -491,16 +470,12 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) int eeh_rmv_from_parent_pe(struct eeh_dev *edev) { struct eeh_pe *pe, *parent, *child; + bool keep, recover; int cnt; - struct pci_dn *pdn = eeh_dev_to_pdn(edev); pe = eeh_dev_to_pe(edev); if (!pe) { - pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n", - __func__, pdn->phb->global_number, - pdn->busno, - PCI_SLOT(pdn->devfn), - PCI_FUNC(pdn->devfn)); + eeh_edev_dbg(edev, "No PE found for device.\n"); return -EEXIST; } @@ -516,10 +491,21 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) */ while (1) { parent = pe->parent; + + /* PHB PEs should never be removed */ if (pe->type & EEH_PE_PHB) break; - if (!(pe->state & EEH_PE_KEEP)) { + /* + * XXX: KEEP is set while resetting a PE. I don't think it's + * ever set without RECOVERING also being set. I could + * be wrong though so catch that with a WARN. + */ + keep = !!(pe->state & EEH_PE_KEEP); + recover = !!(pe->state & EEH_PE_RECOVERING); + WARN_ON(keep && !recover); + + if (!keep && !recover) { if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) { list_del(&pe->child); @@ -528,6 +514,15 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) break; } } else { + /* + * Mark the PE as invalid. At the end of the recovery + * process any invalid PEs will be garbage collected. + * + * We need to delay the free()ing of them since we can + * remove edev's while traversing the PE tree which + * might trigger the removal of a PE and we can't + * deal with that (yet). + */ if (list_empty(&pe->edevs)) { cnt = 0; list_for_each_entry(child, &pe->child_list, child) { @@ -623,13 +618,11 @@ void eeh_pe_mark_isolated(struct eeh_pe *root) } EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated); -static void *__eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag) +static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag) { int mode = *((int *)flag); edev->mode |= mode; - - return NULL; } /** @@ -717,17 +710,13 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT))) return; - pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n", - __func__, pdn->phb->global_number, - pdn->busno, - PCI_SLOT(pdn->devfn), - PCI_FUNC(pdn->devfn)); + eeh_edev_dbg(edev, "Checking PCIe link...\n"); /* Check slot status */ cap = edev->pcie_cap; eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val); if (!(val & PCI_EXP_SLTSTA_PDS)) { - pr_debug(" No card in the slot (0x%04x) !\n", val); + eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val); return; } @@ -736,7 +725,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) if (val & PCI_EXP_SLTCAP_PCP) { eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val); if (val & PCI_EXP_SLTCTL_PCC) { - pr_debug(" In power-off state, power it on ...\n"); + eeh_edev_dbg(edev, "In power-off state, power it on ...\n"); val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); val |= (0x0100 & PCI_EXP_SLTCTL_PIC); eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val); @@ -752,7 +741,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) /* Check link */ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val); if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { - pr_debug(" No link reporting capability (0x%08x) \n", val); + eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val); msleep(1000); return; } @@ -769,10 +758,10 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) } if (val & PCI_EXP_LNKSTA_DLLLA) - pr_debug(" Link up (%s)\n", + eeh_edev_dbg(edev, "Link up (%s)\n", (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB"); else - pr_debug(" Link not ready (0x%04x)\n", val); + eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val); } #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) @@ -852,7 +841,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev) * the expansion ROM base address, the latency timer, and etc. * from the saved values in the device node. */ -static void *eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag) +static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag) { struct pci_dn *pdn = eeh_dev_to_pdn(edev); @@ -864,8 +853,6 @@ static void *eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag) if (eeh_ops->restore_config && pdn) eeh_ops->restore_config(pdn); - - return NULL; } /** diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 54fab22c9a4355..d60908ea37fb9b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -230,7 +230,7 @@ transfer_to_handler_cont: */ lis r12,reenable_mmu@h ori r12,r12,reenable_mmu@l - LOAD_MSR_KERNEL(r0, MSR_KERNEL) + LOAD_REG_IMMEDIATE(r0, MSR_KERNEL) mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r0 SYNC @@ -304,7 +304,7 @@ stack_ovf: addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD lis r9,StackOverflow@ha addi r9,r9,StackOverflow@l - LOAD_MSR_KERNEL(r10,MSR_KERNEL) + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) mtspr SPRN_NRI, r0 #endif @@ -324,7 +324,7 @@ trace_syscall_entry_irq_off: bl trace_hardirqs_on /* Now enable for real */ - LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE) + LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) mtmsr r10 REST_GPR(0, r1) @@ -394,7 +394,7 @@ ret_from_syscall: #endif mr r6,r3 /* disable interrupts so current_thread_info()->flags can't change */ - LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ /* Note: We don't bother telling lockdep about it */ SYNC MTMSRD(r10) @@ -777,11 +777,19 @@ fast_exception_return: 1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 +#if CONFIG_PPC_BOOK3S_601 + bge 2b +#else bge 3f +#endif lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 +#if CONFIG_PPC_BOOK3S_601 + blt 2b +#else blt 3f +#endif lis r3,fee_restarts@ha tophys(r3,r3) lwz r5,fee_restarts@l(r3) @@ -800,9 +808,6 @@ fee_restarts: /* aargh, we don't know which trap this is */ /* but the 601 doesn't implement the RI bit, so assume it's OK */ 3: -BEGIN_FTR_SECTION - b 2b -END_FTR_SECTION_IFSET(CPU_FTR_601) li r10,-1 stw r10,_TRAP(r11) addi r3,r1,STACK_FRAME_OVERHEAD @@ -824,7 +829,7 @@ ret_from_except: * can't change between when we test it and when we return * from the interrupt. */ /* Note: We don't bother telling lockdep about it */ - LOAD_MSR_KERNEL(r10,MSR_KERNEL) + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) SYNC /* Some chip revs have problems here... */ MTMSRD(r10) /* disable interrupts */ @@ -991,7 +996,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) * can restart the exception exit path at the label * exc_exit_restart below. -- paulus */ - LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI) + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI) SYNC MTMSRD(r10) /* clear the RI bit */ .globl exc_exit_restart @@ -1066,7 +1071,7 @@ exc_exit_restart_end: REST_NVGPRS(r1); \ lwz r3,_MSR(r1); \ andi. r3,r3,MSR_PR; \ - LOAD_MSR_KERNEL(r10,MSR_KERNEL); \ + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \ bne user_exc_return; \ lwz r0,GPR0(r1); \ lwz r2,GPR2(r1); \ @@ -1236,7 +1241,7 @@ recheck: * neither. Those disable/enable cycles used to peek at * TI_FLAGS aren't advertised. */ - LOAD_MSR_KERNEL(r10,MSR_KERNEL) + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) SYNC MTMSRD(r10) /* disable interrupts */ lwz r9,TI_FLAGS(r2) @@ -1270,11 +1275,19 @@ nonrecoverable: lis r10,exc_exit_restart_end@ha addi r10,r10,exc_exit_restart_end@l cmplw r12,r10 +#ifdef CONFIG_PPC_BOOK3S_601 + bgelr +#else bge 3f +#endif lis r11,exc_exit_restart@ha addi r11,r11,exc_exit_restart@l cmplw r12,r11 +#ifdef CONFIG_PPC_BOOK3S_601 + bltlr +#else blt 3f +#endif lis r10,ee_restarts@ha lwz r12,ee_restarts@l(r10) addi r12,r12,1 @@ -1283,9 +1296,6 @@ nonrecoverable: blr 3: /* OK, we can't recover, kill this process */ /* but the 601 doesn't implement the RI bit, so assume it's OK */ -BEGIN_FTR_SECTION - blr -END_FTR_SECTION_IFSET(CPU_FTR_601) lwz r3,_TRAP(r1) andi. r0,r3,1 beq 5f @@ -1329,7 +1339,7 @@ _GLOBAL(enter_rtas) lwz r4,RTASBASE(r4) mfmsr r9 stw r9,8(r1) - LOAD_MSR_KERNEL(r0,MSR_KERNEL) + LOAD_REG_IMMEDIATE(r0,MSR_KERNEL) SYNC /* disable interrupts so SRR0/1 */ MTMSRD(r0) /* don't get trashed */ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 0a0b5310f54a6f..6467bdab8d405d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -69,24 +69,20 @@ BEGIN_FTR_SECTION bne .Ltabort_syscall END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif - andi. r10,r12,MSR_PR mr r10,r1 - addi r1,r1,-INT_FRAME_SIZE - beq- 1f ld r1,PACAKSAVE(r13) -1: std r10,0(r1) + std r10,0(r1) std r11,_NIP(r1) std r12,_MSR(r1) std r0,GPR0(r1) std r10,GPR1(r1) - beq 2f /* if from kernel mode */ #ifdef CONFIG_PPC_FSL_BOOK3E START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION #endif ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) -2: std r2,GPR2(r1) + std r2,GPR2(r1) std r3,GPR3(r1) mfcr r2 std r4,GPR4(r1) @@ -122,14 +118,13 @@ END_BTB_FLUSH_SECTION #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) BEGIN_FW_FTR_SECTION - beq 33f - /* if from user, see if there are any DTL entries to process */ + /* see if there are any DTL entries to process */ ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ ld r11,PACA_DTL_RIDX(r13) /* get log read index */ addi r10,r10,LPPACA_DTLIDX LDX_BE r10,0,r10 /* get log write index */ - cmpd cr1,r11,r10 - beq+ cr1,33f + cmpd r11,r10 + beq+ 33f bl accumulate_stolen_time REST_GPR(0,r1) REST_4GPRS(3,r1) @@ -203,6 +198,7 @@ system_call: /* label this so stack traces look sane */ mtctr r12 bctrl /* Call handler */ + /* syscall_exit can exit to kernel mode, via ret_from_kernel_thread */ .Lsyscall_exit: std r3,RESULT(r1) @@ -216,11 +212,6 @@ system_call: /* label this so stack traces look sane */ ld r12, PACA_THREAD_INFO(r13) ld r8,_MSR(r1) -#ifdef CONFIG_PPC_BOOK3S - /* No MSR:RI on BookE */ - andi. r10,r8,MSR_RI - beq- .Lunrecov_restore -#endif /* * This is a few instructions into the actual syscall exit path (which actually diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 1cfb3da4a84a2b..829950b96d2912 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -750,12 +750,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ld r15,PACATOC(r13) ld r14,interrupt_base_book3e@got(r15) ld r15,__end_interrupts@got(r15) -#else - LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) - LOAD_REG_IMMEDIATE(r15,__end_interrupts) -#endif cmpld cr0,r10,r14 cmpld cr1,r10,r15 +#else + LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) + cmpld cr0, r10, r14 + LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts) + cmpld cr1, r10, r14 +#endif blt+ cr0,1f bge+ cr1,1f @@ -820,12 +822,14 @@ kernel_dbg_exc: ld r15,PACATOC(r13) ld r14,interrupt_base_book3e@got(r15) ld r15,__end_interrupts@got(r15) -#else - LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) - LOAD_REG_IMMEDIATE(r15,__end_interrupts) -#endif cmpld cr0,r10,r14 cmpld cr1,r10,r15 +#else + LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) + cmpld cr0, r10, r14 + LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts) + cmpld cr1, r10, r14 +#endif blt+ cr0,1f bge+ cr1,1f @@ -1449,7 +1453,7 @@ a2_tlbinit_code_start: a2_tlbinit_after_linear_map: /* Now we branch the new virtual address mapped by this entry */ - LOAD_REG_IMMEDIATE(r3,1f) + LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) mtctr r3 bctr diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 6ba3cc2ef8abc9..d0018dd17e0a63 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -43,6 +43,58 @@ .endif #endif +/* + * Following are fixed section helper macros. + * + * EXC_REAL_BEGIN/END - real, unrelocated exception vectors + * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors + * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) + * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) + * TRAMP_KVM_BEGIN - KVM handlers, these are put into real, unrelocated + * EXC_COMMON - After switching to virtual, relocated mode. + */ + +#define EXC_REAL_BEGIN(name, start, size) \ + FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) + +#define EXC_REAL_END(name, start, size) \ + FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) + +#define EXC_VIRT_BEGIN(name, start, size) \ + FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) + +#define EXC_VIRT_END(name, start, size) \ + FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) + +#define EXC_COMMON_BEGIN(name) \ + USE_TEXT_SECTION(); \ + .balign IFETCH_ALIGN_BYTES; \ + .global name; \ + _ASM_NOKPROBE_SYMBOL(name); \ + DEFINE_FIXED_SYMBOL(name); \ +name: + +#define TRAMP_REAL_BEGIN(name) \ + FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) + +#define TRAMP_VIRT_BEGIN(name) \ + FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) + +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +#define TRAMP_KVM_BEGIN(name) \ + TRAMP_VIRT_BEGIN(name) +#else +#define TRAMP_KVM_BEGIN(name) +#endif + +#define EXC_REAL_NONE(start, size) \ + FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ + FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) + +#define EXC_VIRT_NONE(start, size) \ + FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ + FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) + /* * We're short on space and time in the exception prolog, so we can't * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. @@ -68,6 +120,7 @@ addis reg,reg,(ABS_ADDR(label))@h /* Exception register prefixes */ +#define EXC_HV_OR_STD 2 /* depends on HVMODE */ #define EXC_HV 1 #define EXC_STD 0 @@ -127,126 +180,6 @@ BEGIN_FTR_SECTION_NESTED(943) \ std ra,offset(r13); \ END_FTR_SECTION_NESTED(ftr,ftr,943) -.macro EXCEPTION_PROLOG_0 area - SET_SCRATCH0(r13) /* save r13 */ - GET_PACA(r13) - std r9,\area\()+EX_R9(r13) /* save r9 */ - OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR) - HMT_MEDIUM - std r10,\area\()+EX_R10(r13) /* save r10 - r12 */ - OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) -.endm - -.macro EXCEPTION_PROLOG_1 hsrr, area, kvm, vec, dar, dsisr, bitmask - OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR) - OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR) - INTERRUPT_TO_KERNEL - SAVE_CTR(r10, \area\()) - mfcr r9 - .if \kvm - KVMTEST \hsrr \vec - .endif - .if \bitmask - lbz r10,PACAIRQSOFTMASK(r13) - andi. r10,r10,\bitmask - /* Associate vector numbers with bits in paca->irq_happened */ - .if \vec == 0x500 || \vec == 0xea0 - li r10,PACA_IRQ_EE - .elseif \vec == 0x900 - li r10,PACA_IRQ_DEC - .elseif \vec == 0xa00 || \vec == 0xe80 - li r10,PACA_IRQ_DBELL - .elseif \vec == 0xe60 - li r10,PACA_IRQ_HMI - .elseif \vec == 0xf00 - li r10,PACA_IRQ_PMI - .else - .abort "Bad maskable vector" - .endif - - .if \hsrr - bne masked_Hinterrupt - .else - bne masked_interrupt - .endif - .endif - - std r11,\area\()+EX_R11(r13) - std r12,\area\()+EX_R12(r13) - - /* - * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], - * because a d-side MCE will clobber those registers so is - * not recoverable if they are live. - */ - GET_SCRATCH0(r10) - std r10,\area\()+EX_R13(r13) - .if \dar - mfspr r10,SPRN_DAR - std r10,\area\()+EX_DAR(r13) - .endif - .if \dsisr - mfspr r10,SPRN_DSISR - stw r10,\area\()+EX_DSISR(r13) - .endif -.endm - -.macro EXCEPTION_PROLOG_2_REAL label, hsrr, set_ri - ld r10,PACAKMSR(r13) /* get MSR value for kernel */ - .if ! \set_ri - xori r10,r10,MSR_RI /* Clear MSR_RI */ - .endif - .if \hsrr - mfspr r11,SPRN_HSRR0 /* save HSRR0 */ - mfspr r12,SPRN_HSRR1 /* and HSRR1 */ - mtspr SPRN_HSRR1,r10 - .else - mfspr r11,SPRN_SRR0 /* save SRR0 */ - mfspr r12,SPRN_SRR1 /* and SRR1 */ - mtspr SPRN_SRR1,r10 - .endif - LOAD_HANDLER(r10, \label\()) - .if \hsrr - mtspr SPRN_HSRR0,r10 - HRFI_TO_KERNEL - .else - mtspr SPRN_SRR0,r10 - RFI_TO_KERNEL - .endif - b . /* prevent speculative execution */ -.endm - -.macro EXCEPTION_PROLOG_2_VIRT label, hsrr -#ifdef CONFIG_RELOCATABLE - .if \hsrr - mfspr r11,SPRN_HSRR0 /* save HSRR0 */ - .else - mfspr r11,SPRN_SRR0 /* save SRR0 */ - .endif - LOAD_HANDLER(r12, \label\()) - mtctr r12 - .if \hsrr - mfspr r12,SPRN_HSRR1 /* and HSRR1 */ - .else - mfspr r12,SPRN_SRR1 /* and HSRR1 */ - .endif - li r10,MSR_RI - mtmsrd r10,1 /* Set RI (EE=0) */ - bctr -#else - .if \hsrr - mfspr r11,SPRN_HSRR0 /* save HSRR0 */ - mfspr r12,SPRN_HSRR1 /* and HSRR1 */ - .else - mfspr r11,SPRN_SRR0 /* save SRR0 */ - mfspr r12,SPRN_SRR1 /* and SRR1 */ - .endif - li r10,MSR_RI - mtmsrd r10,1 /* Set RI (EE=0) */ - b \label -#endif -.endm - /* * Branch to label using its 0xC000 address. This results in instruction * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned @@ -260,6 +193,11 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtctr reg; \ bctr +.macro INT_KVM_HANDLER name, vec, hsrr, area, skip + TRAMP_KVM_BEGIN(\name\()_kvm) + KVM_HANDLER \vec, \hsrr, \area, \skip +.endm + #ifdef CONFIG_KVM_BOOK3S_64_HANDLER #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* @@ -272,17 +210,13 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define kvmppc_interrupt kvmppc_interrupt_pr #endif -.macro KVMTEST hsrr, n +.macro KVMTEST name, hsrr, n lbz r10,HSTATE_IN_GUEST(r13) cmpwi r10,0 - .if \hsrr - bne do_kvm_H\n - .else - bne do_kvm_\n - .endif + bne \name\()_kvm .endm -.macro KVM_HANDLER area, hsrr, n, skip +.macro KVM_HANDLER vec, hsrr, area, skip .if \skip cmpwi r10,KVM_GUEST_MODE_SKIP beq 89f @@ -301,10 +235,16 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948) std r12,HSTATE_SCRATCH0(r13) sldi r12,r9,32 /* HSRR variants have the 0x2 bit added to their trap number */ - .if \hsrr - ori r12,r12,(\n + 0x2) + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + ori r12,r12,(\vec + 0x2) + FTR_SECTION_ELSE + ori r12,r12,(\vec) + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr + ori r12,r12,(\vec + 0x2) .else - ori r12,r12,(\n) + ori r12,r12,(\vec) .endif #ifdef CONFIG_RELOCATABLE @@ -329,7 +269,13 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948) 89: mtocrf 0x80,r9 ld r9,\area+EX_R9(r13) ld r10,\area+EX_R10(r13) - .if \hsrr + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + b kvmppc_skip_Hinterrupt + FTR_SECTION_ELSE + b kvmppc_skip_interrupt + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr b kvmppc_skip_Hinterrupt .else b kvmppc_skip_interrupt @@ -338,88 +284,328 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948) .endm #else -.macro KVMTEST hsrr, n +.macro KVMTEST name, hsrr, n .endm -.macro KVM_HANDLER area, hsrr, n, skip +.macro KVM_HANDLER name, vec, hsrr, area, skip +.endm +#endif + +.macro INT_SAVE_SRR_AND_JUMP label, hsrr, set_ri + ld r10,PACAKMSR(r13) /* get MSR value for kernel */ + .if ! \set_ri + xori r10,r10,MSR_RI /* Clear MSR_RI */ + .endif + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + mtspr SPRN_HSRR1,r10 + FTR_SECTION_ELSE + mfspr r11,SPRN_SRR0 /* save SRR0 */ + mfspr r12,SPRN_SRR1 /* and SRR1 */ + mtspr SPRN_SRR1,r10 + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + mtspr SPRN_HSRR1,r10 + .else + mfspr r11,SPRN_SRR0 /* save SRR0 */ + mfspr r12,SPRN_SRR1 /* and SRR1 */ + mtspr SPRN_SRR1,r10 + .endif + LOAD_HANDLER(r10, \label\()) + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + mtspr SPRN_HSRR0,r10 + HRFI_TO_KERNEL + FTR_SECTION_ELSE + mtspr SPRN_SRR0,r10 + RFI_TO_KERNEL + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr + mtspr SPRN_HSRR0,r10 + HRFI_TO_KERNEL + .else + mtspr SPRN_SRR0,r10 + RFI_TO_KERNEL + .endif + b . /* prevent speculative execution */ .endm + +/* INT_SAVE_SRR_AND_JUMP works for real or virt, this is faster but virt only */ +.macro INT_VIRT_SAVE_SRR_AND_JUMP label, hsrr +#ifdef CONFIG_RELOCATABLE + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + FTR_SECTION_ELSE + mfspr r11,SPRN_SRR0 /* save SRR0 */ + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + .else + mfspr r11,SPRN_SRR0 /* save SRR0 */ + .endif + LOAD_HANDLER(r12, \label\()) + mtctr r12 + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + FTR_SECTION_ELSE + mfspr r12,SPRN_SRR1 /* and HSRR1 */ + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + .else + mfspr r12,SPRN_SRR1 /* and HSRR1 */ + .endif + li r10,MSR_RI + mtmsrd r10,1 /* Set RI (EE=0) */ + bctr +#else + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + FTR_SECTION_ELSE + mfspr r11,SPRN_SRR0 /* save SRR0 */ + mfspr r12,SPRN_SRR1 /* and SRR1 */ + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr + mfspr r11,SPRN_HSRR0 /* save HSRR0 */ + mfspr r12,SPRN_HSRR1 /* and HSRR1 */ + .else + mfspr r11,SPRN_SRR0 /* save SRR0 */ + mfspr r12,SPRN_SRR1 /* and SRR1 */ + .endif + li r10,MSR_RI + mtmsrd r10,1 /* Set RI (EE=0) */ + b \label #endif +.endm + +/* + * This is the BOOK3S interrupt entry code macro. + * + * This can result in one of several things happening: + * - Branch to the _common handler, relocated, in virtual mode. + * These are normal interrupts (synchronous and asynchronous) handled by + * the kernel. + * - Branch to KVM, relocated but real mode interrupts remain in real mode. + * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by + * / intended for host or guest kernel, but KVM must always be involved + * because the machine state is set for guest execution. + * - Branch to the masked handler, unrelocated. + * These occur when maskable asynchronous interrupts are taken with the + * irq_soft_mask set. + * - Branch to an "early" handler in real mode but relocated. + * This is done if early=1. MCE and HMI use these to handle errors in real + * mode. + * - Fall through and continue executing in real, unrelocated mode. + * This is done if early=2. + */ +.macro INT_HANDLER name, vec, ool=0, early=0, virt=0, hsrr=0, area=PACA_EXGEN, ri=1, dar=0, dsisr=0, bitmask=0, kvm=0 + SET_SCRATCH0(r13) /* save r13 */ + GET_PACA(r13) + std r9,\area\()+EX_R9(r13) /* save r9 */ + OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR) + HMT_MEDIUM + std r10,\area\()+EX_R10(r13) /* save r10 - r12 */ + OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) + .if \ool + .if !\virt + b tramp_real_\name + .pushsection .text + TRAMP_REAL_BEGIN(tramp_real_\name) + .else + b tramp_virt_\name + .pushsection .text + TRAMP_VIRT_BEGIN(tramp_virt_\name) + .endif + .endif -#define EXCEPTION_PROLOG_COMMON_1() \ - std r9,_CCR(r1); /* save CR in stackframe */ \ - std r11,_NIP(r1); /* save SRR0 in stackframe */ \ - std r12,_MSR(r1); /* save SRR1 in stackframe */ \ - std r10,0(r1); /* make stack chain pointer */ \ - std r0,GPR0(r1); /* save r0 in stackframe */ \ - std r10,GPR1(r1); /* save r1 in stackframe */ \ - -/* Save original regs values from save area to stack frame. */ -#define EXCEPTION_PROLOG_COMMON_2(area) \ - ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ - ld r10,area+EX_R10(r13); \ - std r9,GPR9(r1); \ - std r10,GPR10(r1); \ - ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ - ld r10,area+EX_R12(r13); \ - ld r11,area+EX_R13(r13); \ - std r9,GPR11(r1); \ - std r10,GPR12(r1); \ - std r11,GPR13(r1); \ -BEGIN_FTR_SECTION_NESTED(66); \ - ld r10,area+EX_CFAR(r13); \ - std r10,ORIG_GPR3(r1); \ -END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ - GET_CTR(r10, area); \ - std r10,_CTR(r1); - -#define EXCEPTION_PROLOG_COMMON_3(trap) \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ - SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ - SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ - mflr r9; /* Get LR, later save to stack */ \ - ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ - std r9,_LINK(r1); \ - lbz r10,PACAIRQSOFTMASK(r13); \ - mfspr r11,SPRN_XER; /* save XER in stackframe */ \ - std r10,SOFTE(r1); \ - std r11,_XER(r1); \ - li r9,(trap)+1; \ - std r9,_TRAP(r1); /* set trap number */ \ - li r10,0; \ - ld r11,exception_marker@toc(r2); \ - std r10,RESULT(r1); /* clear regs->result */ \ - std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ + OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR) + OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR) + INTERRUPT_TO_KERNEL + SAVE_CTR(r10, \area\()) + mfcr r9 + .if \kvm + KVMTEST \name \hsrr \vec + .endif + .if \bitmask + lbz r10,PACAIRQSOFTMASK(r13) + andi. r10,r10,\bitmask + /* Associate vector numbers with bits in paca->irq_happened */ + .if \vec == 0x500 || \vec == 0xea0 + li r10,PACA_IRQ_EE + .elseif \vec == 0x900 + li r10,PACA_IRQ_DEC + .elseif \vec == 0xa00 || \vec == 0xe80 + li r10,PACA_IRQ_DBELL + .elseif \vec == 0xe60 + li r10,PACA_IRQ_HMI + .elseif \vec == 0xf00 + li r10,PACA_IRQ_PMI + .else + .abort "Bad maskable vector" + .endif + + .if \hsrr == EXC_HV_OR_STD + BEGIN_FTR_SECTION + bne masked_Hinterrupt + FTR_SECTION_ELSE + bne masked_interrupt + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif \hsrr + bne masked_Hinterrupt + .else + bne masked_interrupt + .endif + .endif + + std r11,\area\()+EX_R11(r13) + std r12,\area\()+EX_R12(r13) + + /* + * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], + * because a d-side MCE will clobber those registers so is + * not recoverable if they are live. + */ + GET_SCRATCH0(r10) + std r10,\area\()+EX_R13(r13) + .if \dar + .if \hsrr + mfspr r10,SPRN_HDAR + .else + mfspr r10,SPRN_DAR + .endif + std r10,\area\()+EX_DAR(r13) + .endif + .if \dsisr + .if \hsrr + mfspr r10,SPRN_HDSISR + .else + mfspr r10,SPRN_DSISR + .endif + stw r10,\area\()+EX_DSISR(r13) + .endif + + .if \early == 2 + /* nothing more */ + .elseif \early + mfctr r10 /* save ctr, even for !RELOCATABLE */ + BRANCH_TO_C000(r11, \name\()_early_common) + .elseif !\virt + INT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr, \ri + .else + INT_VIRT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr + .endif + .if \ool + .popsection + .endif +.endm /* * On entry r13 points to the paca, r9-r13 are saved in the paca, * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and * SRR1, and relocation is on. + * + * If stack=0, then the stack is already set in r1, and r1 is saved in r10. + * PPR save and CPU accounting is not done for the !stack case (XXX why not?) */ -#define EXCEPTION_COMMON(area, trap) \ - andi. r10,r12,MSR_PR; /* See if coming from user */ \ - mr r10,r1; /* Save r1 */ \ - subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ - beq- 1f; \ - ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ -1: tdgei r1,-INT_FRAME_SIZE; /* trap if r1 is in userspace */ \ - EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \ -3: EXCEPTION_PROLOG_COMMON_1(); \ - kuap_save_amr_and_lock r9, r10, cr1, cr0; \ - beq 4f; /* if from kernel mode */ \ - ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \ - SAVE_PPR(area, r9); \ -4: EXCEPTION_PROLOG_COMMON_2(area); \ - EXCEPTION_PROLOG_COMMON_3(trap); \ +.macro INT_COMMON vec, area, stack, kaup, reconcile, dar, dsisr + .if \stack + andi. r10,r12,MSR_PR /* See if coming from user */ + mr r10,r1 /* Save r1 */ + subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ + beq- 100f + ld r1,PACAKSAVE(r13) /* kernel stack to use */ +100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 + .endif + + std r9,_CCR(r1) /* save CR in stackframe */ + std r11,_NIP(r1) /* save SRR0 in stackframe */ + std r12,_MSR(r1) /* save SRR1 in stackframe */ + std r10,0(r1) /* make stack chain pointer */ + std r0,GPR0(r1) /* save r0 in stackframe */ + std r10,GPR1(r1) /* save r1 in stackframe */ + + .if \stack + .if \kaup + kuap_save_amr_and_lock r9, r10, cr1, cr0 + .endif + beq 101f /* if from kernel mode */ + ACCOUNT_CPU_USER_ENTRY(r13, r9, r10) + SAVE_PPR(\area, r9) +101: + .else + .if \kaup + kuap_save_amr_and_lock r9, r10, cr1 + .endif + .endif + + /* Save original regs values from save area to stack frame. */ + ld r9,\area+EX_R9(r13) /* move r9, r10 to stackframe */ + ld r10,\area+EX_R10(r13) + std r9,GPR9(r1) + std r10,GPR10(r1) + ld r9,\area+EX_R11(r13) /* move r11 - r13 to stackframe */ + ld r10,\area+EX_R12(r13) + ld r11,\area+EX_R13(r13) + std r9,GPR11(r1) + std r10,GPR12(r1) + std r11,GPR13(r1) + .if \dar + .if \dar == 2 + ld r10,_NIP(r1) + .else + ld r10,\area+EX_DAR(r13) + .endif + std r10,_DAR(r1) + .endif + .if \dsisr + .if \dsisr == 2 + ld r10,_MSR(r1) + lis r11,DSISR_SRR1_MATCH_64S@h + and r10,r10,r11 + .else + lwz r10,\area+EX_DSISR(r13) + .endif + std r10,_DSISR(r1) + .endif +BEGIN_FTR_SECTION_NESTED(66) + ld r10,\area+EX_CFAR(r13) + std r10,ORIG_GPR3(r1) +END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66) + GET_CTR(r10, \area) + std r10,_CTR(r1) + std r2,GPR2(r1) /* save r2 in stackframe */ + SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */ + SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */ + mflr r9 /* Get LR, later save to stack */ + ld r2,PACATOC(r13) /* get kernel TOC into r2 */ + std r9,_LINK(r1) + lbz r10,PACAIRQSOFTMASK(r13) + mfspr r11,SPRN_XER /* save XER in stackframe */ + std r10,SOFTE(r1) + std r11,_XER(r1) + li r9,(\vec)+1 + std r9,_TRAP(r1) /* set trap number */ + li r10,0 + ld r11,exception_marker@toc(r2) + std r10,RESULT(r1) /* clear regs->result */ + std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ + + .if \stack ACCOUNT_STOLEN_TIME + .endif -/* - * Exception where stack is already set in r1, r1 is saved in r10. - * PPR save and CPU accounting is not done (for some reason). - */ -#define EXCEPTION_COMMON_STACK(area, trap) \ - EXCEPTION_PROLOG_COMMON_1(); \ - kuap_save_amr_and_lock r9, r10, cr1; \ - EXCEPTION_PROLOG_COMMON_2(area); \ - EXCEPTION_PROLOG_COMMON_3(trap) + .if \reconcile + RECONCILE_IRQ_STATE(r10, r11) + .endif +.endm /* * Restore all registers including H/SRR0/1 saved in a stack frame of a @@ -428,6 +614,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ .macro EXCEPTION_RESTORE_REGS hsrr /* Move original SRR0 and SRR1 into the respective regs */ ld r9,_MSR(r1) + .if \hsrr == EXC_HV_OR_STD + .error "EXC_HV_OR_STD Not implemented for EXCEPTION_RESTORE_REGS" + .endif .if \hsrr mtspr SPRN_HSRR1,r9 .else @@ -481,219 +670,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) #define FINISH_NAP #endif -/* - * Following are the BOOK3S exception handler helper macros. - * Handlers come in a number of types, and each type has a number of varieties. - * - * EXC_REAL_* - real, unrelocated exception vectors - * EXC_VIRT_* - virt (AIL), unrelocated exception vectors - * TRAMP_REAL_* - real, unrelocated helpers (virt can call these) - * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use) - * TRAMP_KVM - KVM handlers that get put into real, unrelocated - * EXC_COMMON - virt, relocated common handlers - * - * The EXC handlers are given a name, and branch to name_common, or the - * appropriate KVM or masking function. Vector handler verieties are as - * follows: - * - * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception - * - * EXC_{REAL|VIRT} - standard exception - * - * EXC_{REAL|VIRT}_suffix - * where _suffix is: - * - _MASKABLE - maskable exception - * - _OOL - out of line with trampoline to common handler - * - _HV - HV exception - * - * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV - * - * KVM handlers come in the following verieties: - * TRAMP_KVM - * TRAMP_KVM_SKIP - * TRAMP_KVM_HV - * TRAMP_KVM_HV_SKIP - * - * COMMON handlers come in the following verieties: - * EXC_COMMON_BEGIN/END - used to open-code the handler - * EXC_COMMON - * EXC_COMMON_ASYNC - * - * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM - * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers. - */ - -#define __EXC_REAL(name, start, size, area) \ - EXC_REAL_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 area ; \ - EXCEPTION_PROLOG_1 EXC_STD, area, 1, start, 0, 0, 0 ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \ - EXC_REAL_END(name, start, size) - -#define EXC_REAL(name, start, size) \ - __EXC_REAL(name, start, size, PACA_EXGEN) - -#define __EXC_VIRT(name, start, size, realvec, area) \ - EXC_VIRT_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 area ; \ - EXCEPTION_PROLOG_1 EXC_STD, area, 0, realvec, 0, 0, 0; \ - EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \ - EXC_VIRT_END(name, start, size) - -#define EXC_VIRT(name, start, size, realvec) \ - __EXC_VIRT(name, start, size, realvec, PACA_EXGEN) - -#define EXC_REAL_MASKABLE(name, start, size, bitmask) \ - EXC_REAL_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 PACA_EXGEN ; \ - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, start, 0, 0, bitmask ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \ - EXC_REAL_END(name, start, size) - -#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \ - EXC_VIRT_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 PACA_EXGEN ; \ - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \ - EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \ - EXC_VIRT_END(name, start, size) - -#define EXC_REAL_HV(name, start, size) \ - EXC_REAL_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 PACA_EXGEN; \ - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, start, 0, 0, 0 ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 ; \ - EXC_REAL_END(name, start, size) - -#define EXC_VIRT_HV(name, start, size, realvec) \ - EXC_VIRT_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 PACA_EXGEN; \ - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ; \ - EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV ; \ - EXC_VIRT_END(name, start, size) - -#define __EXC_REAL_OOL(name, start, size) \ - EXC_REAL_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 PACA_EXGEN ; \ - b tramp_real_##name ; \ - EXC_REAL_END(name, start, size) - -#define __TRAMP_REAL_OOL(name, vec) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, 0 ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 - -#define EXC_REAL_OOL(name, start, size) \ - __EXC_REAL_OOL(name, start, size); \ - __TRAMP_REAL_OOL(name, start) - -#define __EXC_REAL_OOL_MASKABLE(name, start, size) \ - __EXC_REAL_OOL(name, start, size) - -#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0, 0, bitmask ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 - -#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \ - __EXC_REAL_OOL_MASKABLE(name, start, size); \ - __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask) - -#define __EXC_REAL_OOL_HV(name, start, size) \ - __EXC_REAL_OOL(name, start, size) - -#define __TRAMP_REAL_OOL_HV(name, vec) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, 0 ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 - -#define EXC_REAL_OOL_HV(name, start, size) \ - __EXC_REAL_OOL_HV(name, start, size); \ - __TRAMP_REAL_OOL_HV(name, start) - -#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \ - __EXC_REAL_OOL(name, start, size) - -#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \ - TRAMP_REAL_BEGIN(tramp_real_##name); \ - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0, 0, bitmask ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 - -#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \ - __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \ - __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask) - -#define __EXC_VIRT_OOL(name, start, size) \ - EXC_VIRT_BEGIN(name, start, size); \ - EXCEPTION_PROLOG_0 PACA_EXGEN ; \ - b tramp_virt_##name; \ - EXC_VIRT_END(name, start, size) - -#define __TRAMP_VIRT_OOL(name, realvec) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, vec, 0, 0, 0 ; \ - EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD - -#define EXC_VIRT_OOL(name, start, size, realvec) \ - __EXC_VIRT_OOL(name, start, size); \ - __TRAMP_VIRT_OOL(name, realvec) - -#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \ - __EXC_VIRT_OOL(name, start, size) - -#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, 0, 0, bitmask ; \ - EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 - -#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \ - __EXC_VIRT_OOL_MASKABLE(name, start, size); \ - __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) - -#define __EXC_VIRT_OOL_HV(name, start, size) \ - __EXC_VIRT_OOL(name, start, size) - -#define __TRAMP_VIRT_OOL_HV(name, realvec) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, 0 ; \ - EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV - -#define EXC_VIRT_OOL_HV(name, start, size, realvec) \ - __EXC_VIRT_OOL_HV(name, start, size); \ - __TRAMP_VIRT_OOL_HV(name, realvec) - -#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \ - __EXC_VIRT_OOL(name, start, size) - -#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \ - TRAMP_VIRT_BEGIN(tramp_virt_##name); \ - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0, 0, bitmask ; \ - EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV - -#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \ - __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \ - __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) - -#define TRAMP_KVM(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_##n); \ - KVM_HANDLER area, EXC_STD, n, 0 - -#define TRAMP_KVM_SKIP(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_##n); \ - KVM_HANDLER area, EXC_STD, n, 1 - -#define TRAMP_KVM_HV(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_H##n); \ - KVM_HANDLER area, EXC_HV, n, 0 - -#define TRAMP_KVM_HV_SKIP(area, n) \ - TRAMP_KVM_BEGIN(do_kvm_H##n); \ - KVM_HANDLER area, EXC_HV, n, 1 - #define EXC_COMMON(name, realvec, hdlr) \ EXC_COMMON_BEGIN(name); \ - EXCEPTION_COMMON(PACA_EXGEN, realvec); \ + INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ; \ bl save_nvgprs; \ - RECONCILE_IRQ_STATE(r10, r11); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ b ret_from_except @@ -704,9 +684,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) */ #define EXC_COMMON_ASYNC(name, realvec, hdlr) \ EXC_COMMON_BEGIN(name); \ - EXCEPTION_COMMON(PACA_EXGEN, realvec); \ + INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ; \ FINISH_NAP; \ - RECONCILE_IRQ_STATE(r10, r11); \ RUNLATCH_ON; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ @@ -836,9 +815,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif - EXCEPTION_PROLOG_0 PACA_EXNMI - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 1, 0x100, 0, 0, 0 - EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0 + INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0, kvm=1 /* * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is * being used, so a nested NMI exception would corrupt it. @@ -850,9 +827,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) * be dangerous anyway. */ EXC_REAL_END(system_reset, 0x100, 0x100) - EXC_VIRT_NONE(0x4100, 0x100) -TRAMP_KVM(PACA_EXNMI, 0x100) +INT_KVM_HANDLER system_reset 0x100, EXC_STD, PACA_EXNMI, 0 #ifdef CONFIG_PPC_P7_NAP TRAMP_REAL_BEGIN(system_reset_idle_wake) @@ -868,9 +844,7 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake) */ TRAMP_REAL_BEGIN(system_reset_fwnmi) /* See comment at system_reset exception, don't turn on RI */ - EXCEPTION_PROLOG_0 PACA_EXNMI - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 0, 0x100, 0, 0, 0 - EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0 + INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0 #endif /* CONFIG_PPC_PSERIES */ @@ -890,7 +864,7 @@ EXC_COMMON_BEGIN(system_reset_common) mr r10,r1 ld r1,PACA_NMI_EMERG_SP(r13) subi r1,r1,INT_FRAME_SIZE - EXCEPTION_COMMON_STACK(PACA_EXNMI, 0x100) + INT_COMMON 0x100, PACA_EXNMI, 0, 1, 0, 0, 0 bl save_nvgprs /* * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does @@ -929,30 +903,43 @@ EXC_COMMON_BEGIN(system_reset_common) stb r10,PACAIRQSOFTMASK(r13) EXCEPTION_RESTORE_REGS EXC_STD - RFI_TO_USER_OR_KERNEL + RFI_TO_USER_OR_KERNEL + + +EXC_REAL_BEGIN(machine_check, 0x200, 0x100) + INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1 + /* + * MSR_RI is not enabled, because PACA_EXMC is being used, so a + * nested machine check corrupts it. machine_check_common enables + * MSR_RI. + */ +EXC_REAL_END(machine_check, 0x200, 0x100) +EXC_VIRT_NONE(0x4200, 0x100) + +#ifdef CONFIG_PPC_PSERIES +TRAMP_REAL_BEGIN(machine_check_fwnmi) + /* See comment at machine_check exception, don't turn on RI */ + INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1 +#endif + +INT_KVM_HANDLER machine_check 0x200, EXC_STD, PACA_EXMC, 1 + +#define MACHINE_CHECK_HANDLER_WINDUP \ + /* Clear MSR_RI before setting SRR0 and SRR1. */\ + li r9,0; \ + mtmsrd r9,1; /* Clear MSR_RI */ \ + /* Decrement paca->in_mce now RI is clear. */ \ + lhz r12,PACA_IN_MCE(r13); \ + subi r12,r12,1; \ + sth r12,PACA_IN_MCE(r13); \ + EXCEPTION_RESTORE_REGS EXC_STD +EXC_COMMON_BEGIN(machine_check_early_common) + mtctr r10 /* Restore ctr */ + mfspr r11,SPRN_SRR0 + mfspr r12,SPRN_SRR1 -EXC_REAL_BEGIN(machine_check, 0x200, 0x100) - /* This is moved out of line as it can be patched by FW, but - * some code path might still want to branch into the original - * vector - */ - EXCEPTION_PROLOG_0 PACA_EXMC -BEGIN_FTR_SECTION - b machine_check_common_early -FTR_SECTION_ELSE - b machine_check_pSeries_0 -ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) -EXC_REAL_END(machine_check, 0x200, 0x100) -EXC_VIRT_NONE(0x4200, 0x100) -TRAMP_REAL_BEGIN(machine_check_common_early) - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0, 0, 0 /* - * Register contents: - * R13 = PACA - * R9 = CR - * Original R9 to R13 is saved on PACA_EXMC - * * Switch to mc_emergency stack and handle re-entrancy (we limit * the nested MCE upto level 4 to avoid stack overflow). * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 @@ -973,103 +960,127 @@ TRAMP_REAL_BEGIN(machine_check_common_early) * the machine check is handled then the idle wakeup code is called * to restore state. */ - mr r11,r1 /* Save r1 */ lhz r10,PACA_IN_MCE(r13) cmpwi r10,0 /* Are we in nested machine check */ - bne 0f /* Yes, we are. */ - /* First machine check entry */ - ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ -0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ + cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ addi r10,r10,1 /* increment paca->in_mce */ sth r10,PACA_IN_MCE(r13) - /* Limit nested MCE to level 4 to avoid stack overflow */ - cmpwi r10,MAX_MCE_DEPTH - bgt 2f /* Check if we hit limit of 4 */ - std r11,GPR1(r1) /* Save r1 on the stack. */ - std r11,0(r1) /* make stack chain pointer */ - mfspr r11,SPRN_SRR0 /* Save SRR0 */ - std r11,_NIP(r1) - mfspr r11,SPRN_SRR1 /* Save SRR1 */ - std r11,_MSR(r1) - mfspr r11,SPRN_DAR /* Save DAR */ - std r11,_DAR(r1) - mfspr r11,SPRN_DSISR /* Save DSISR */ - std r11,_DSISR(r1) - std r9,_CCR(r1) /* Save CR in stackframe */ + + mr r10,r1 /* Save r1 */ + bne 1f + /* First machine check entry */ + ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ +1: /* Limit nested MCE to level 4 to avoid stack overflow */ + bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ + subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ + /* We don't touch AMR here, we never go to virtual mode */ - /* Save r9 through r13 from EXMC save area to stack frame. */ - EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) - mfmsr r11 /* get MSR value */ + INT_COMMON 0x200, PACA_EXMC, 0, 0, 0, 1, 1 + BEGIN_FTR_SECTION - ori r11,r11,MSR_ME /* turn on ME bit */ + bl enable_machine_check END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) - ori r11,r11,MSR_RI /* turn on RI bit */ - LOAD_HANDLER(r12, machine_check_handle_early) -1: mtspr SPRN_SRR0,r12 - mtspr SPRN_SRR1,r11 - RFI_TO_KERNEL - b . /* prevent speculative execution */ -2: - /* Stack overflow. Stay on emergency stack and panic. - * Keep the ME bit off while panic-ing, so that if we hit - * another machine check we checkstop. - */ - addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */ - ld r11,PACAKMSR(r13) - LOAD_HANDLER(r12, unrecover_mce) - li r10,MSR_ME - andc r11,r11,r10 /* Turn off MSR_ME */ - b 1b - b . /* prevent speculative execution */ + li r10,MSR_RI + mtmsrd r10,1 + + bl save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl machine_check_early + std r3,RESULT(r1) /* Save result */ + ld r12,_MSR(r1) -TRAMP_REAL_BEGIN(machine_check_pSeries) - .globl machine_check_fwnmi -machine_check_fwnmi: - EXCEPTION_PROLOG_0 PACA_EXMC +#ifdef CONFIG_PPC_P7_NAP + /* + * Check if thread was in power saving mode. We come here when any + * of the following is true: + * a. thread wasn't in power saving mode + * b. thread was in power saving mode with no state loss, + * supervisor state loss or hypervisor state loss. + * + * Go back to nap/sleep/winkle mode again if (b) is true. + */ BEGIN_FTR_SECTION - b machine_check_common_early -END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) -machine_check_pSeries_0: - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0 + rlwinm. r11,r12,47-31,30,31 + bne machine_check_idle_common +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) +#endif + +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER /* - * MSR_RI is not enabled, because PACA_EXMC is being used, so a - * nested machine check corrupts it. machine_check_common enables - * MSR_RI. + * Check if we are coming from guest. If yes, then run the normal + * exception handler which will take the + * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event + * to guest. */ - EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0 + lbz r11,HSTATE_IN_GUEST(r13) + cmpwi r11,0 /* Check if coming from guest */ + bne mce_deliver /* continue if we are. */ +#endif + + /* + * Check if we are coming from userspace. If yes, then run the normal + * exception handler which will deliver the MC event to this kernel. + */ + andi. r11,r12,MSR_PR /* See if coming from user. */ + bne mce_deliver /* continue in V mode if we are. */ + + /* + * At this point we are coming from kernel context. + * Queue up the MCE event and return from the interrupt. + * But before that, check if this is an un-recoverable exception. + * If yes, then stay on emergency stack and panic. + */ + andi. r11,r12,MSR_RI + beq unrecoverable_mce + + /* + * Check if we have successfully handled/recovered from error, if not + * then stay on emergency stack and panic. + */ + ld r3,RESULT(r1) /* Load result */ + cmpdi r3,0 /* see if we handled MCE successfully */ + beq unrecoverable_mce /* if !handled then panic */ + + /* + * Return from MC interrupt. + * Queue up the MCE event so that we can log it later, while + * returning from kernel or opal call. + */ + bl machine_check_queue_event + MACHINE_CHECK_HANDLER_WINDUP + RFI_TO_KERNEL -TRAMP_KVM_SKIP(PACA_EXMC, 0x200) +mce_deliver: + /* + * This is a host user or guest MCE. Restore all registers, then + * run the "late" handler. For host user, this will run the + * machine_check_exception handler in virtual mode like a normal + * interrupt handler. For guest, this will trigger the KVM test + * and branch to the KVM interrupt similarly to other interrupts. + */ +BEGIN_FTR_SECTION + ld r10,ORIG_GPR3(r1) + mtspr SPRN_CFAR,r10 +END_FTR_SECTION_IFSET(CPU_FTR_CFAR) + MACHINE_CHECK_HANDLER_WINDUP + /* See comment at machine_check exception, don't turn on RI */ + INT_HANDLER machine_check, 0x200, area=PACA_EXMC, ri=0, dar=1, dsisr=1, kvm=1 EXC_COMMON_BEGIN(machine_check_common) /* * Machine check is different because we use a different * save area: PACA_EXMC instead of PACA_EXGEN. */ - EXCEPTION_COMMON(PACA_EXMC, 0x200) + INT_COMMON 0x200, PACA_EXMC, 1, 1, 1, 1, 1 FINISH_NAP - RECONCILE_IRQ_STATE(r10, r11) - ld r3,PACA_EXMC+EX_DAR(r13) - lwz r4,PACA_EXMC+EX_DSISR(r13) /* Enable MSR_RI when finished with PACA_EXMC */ li r10,MSR_RI mtmsrd r10,1 - std r3,_DAR(r1) - std r4,_DSISR(r1) bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_exception b ret_from_except -#define MACHINE_CHECK_HANDLER_WINDUP \ - /* Clear MSR_RI before setting SRR0 and SRR1. */\ - li r9,0; \ - mtmsrd r9,1; /* Clear MSR_RI */ \ - /* Decrement paca->in_mce now RI is clear. */ \ - lhz r12,PACA_IN_MCE(r13); \ - subi r12,r12,1; \ - sth r12,PACA_IN_MCE(r13); \ - EXCEPTION_RESTORE_REGS EXC_STD - #ifdef CONFIG_PPC_P7_NAP /* * This is an idle wakeup. Low level machine check has already been @@ -1101,72 +1112,8 @@ EXC_COMMON_BEGIN(machine_check_idle_common) bltlr cr1 /* no state loss, return to idle caller */ b idle_return_gpr_loss #endif - /* - * Handle machine check early in real mode. We come here with - * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. - */ -EXC_COMMON_BEGIN(machine_check_handle_early) - std r0,GPR0(r1) /* Save r0 */ - EXCEPTION_PROLOG_COMMON_3(0x200) - bl save_nvgprs - addi r3,r1,STACK_FRAME_OVERHEAD - bl machine_check_early - std r3,RESULT(r1) /* Save result */ - ld r12,_MSR(r1) -BEGIN_FTR_SECTION - b 4f -END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) - -#ifdef CONFIG_PPC_P7_NAP - /* - * Check if thread was in power saving mode. We come here when any - * of the following is true: - * a. thread wasn't in power saving mode - * b. thread was in power saving mode with no state loss, - * supervisor state loss or hypervisor state loss. - * - * Go back to nap/sleep/winkle mode again if (b) is true. - */ -BEGIN_FTR_SECTION - rlwinm. r11,r12,47-31,30,31 - bne machine_check_idle_common -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) -#endif - - /* - * Check if we are coming from hypervisor userspace. If yes then we - * continue in host kernel in V mode to deliver the MC event. - */ - rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */ - beq 5f -4: andi. r11,r12,MSR_PR /* See if coming from user. */ - bne 9f /* continue in V mode if we are. */ -5: -#ifdef CONFIG_KVM_BOOK3S_64_HANDLER -BEGIN_FTR_SECTION - /* - * We are coming from kernel context. Check if we are coming from - * guest. if yes, then we can continue. We will fall through - * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest. - */ - lbz r11,HSTATE_IN_GUEST(r13) - cmpwi r11,0 /* Check if coming from guest */ - bne 9f /* continue if we are. */ -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) -#endif - /* - * At this point we are not sure about what context we come from. - * Queue up the MCE event and return from the interrupt. - * But before that, check if this is an un-recoverable exception. - * If yes, then stay on emergency stack and panic. - */ - andi. r11,r12,MSR_RI - bne 2f -1: mfspr r11,SPRN_SRR0 - LOAD_HANDLER(r10,unrecover_mce) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) +EXC_COMMON_BEGIN(unrecoverable_mce) /* * We are going down. But there are chances that we might get hit by * another MCE during panic path and we may run into unstable state @@ -1174,84 +1121,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) * when another MCE is hit during panic path, system will checkstop * and hypervisor will get restarted cleanly by SP. */ - li r3,MSR_ME - andc r10,r10,r3 /* Turn off MSR_ME */ - mtspr SPRN_SRR1,r10 - RFI_TO_KERNEL - b . -2: - /* - * Check if we have successfully handled/recovered from error, if not - * then stay on emergency stack and panic. - */ - ld r3,RESULT(r1) /* Load result */ - cmpdi r3,0 /* see if we handled MCE successfully */ - - beq 1b /* if !handled then panic */ BEGIN_FTR_SECTION - /* - * Return from MC interrupt. - * Queue up the MCE event so that we can log it later, while - * returning from kernel or opal call. - */ - bl machine_check_queue_event - MACHINE_CHECK_HANDLER_WINDUP - RFI_TO_USER_OR_KERNEL -FTR_SECTION_ELSE - /* - * pSeries: Return from MC interrupt. Before that stay on emergency - * stack and call machine_check_exception to log the MCE event. - */ - LOAD_HANDLER(r10,mce_return) - mtspr SPRN_SRR0,r10 + li r10,0 /* clear MSR_RI */ + mtmsrd r10,1 + bl disable_machine_check +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - RFI_TO_KERNEL - b . -ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) -9: - /* Deliver the machine check to host kernel in V mode. */ - MACHINE_CHECK_HANDLER_WINDUP - EXCEPTION_PROLOG_0 PACA_EXMC - b machine_check_pSeries_0 + li r3,MSR_ME + andc r10,r10,r3 + mtmsrd r10 -EXC_COMMON_BEGIN(unrecover_mce) /* Invoke machine_check_exception to print MCE event and panic. */ addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_exception + /* - * We will not reach here. Even if we did, there is no way out. Call - * unrecoverable_exception and die. + * We will not reach here. Even if we did, there is no way out. + * Call unrecoverable_exception and die. */ -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl unrecoverable_exception - b 1b - -EXC_COMMON_BEGIN(mce_return) - /* Invoke machine_check_exception to print MCE event and return. */ addi r3,r1,STACK_FRAME_OVERHEAD - bl machine_check_exception - MACHINE_CHECK_HANDLER_WINDUP - RFI_TO_KERNEL + bl unrecoverable_exception b . + EXC_REAL_BEGIN(data_access, 0x300, 0x80) - EXCEPTION_PROLOG_0 PACA_EXGEN - b tramp_real_data_access + INT_HANDLER data_access, 0x300, ool=1, dar=1, dsisr=1, kvm=1 EXC_REAL_END(data_access, 0x300, 0x80) - -TRAMP_REAL_BEGIN(tramp_real_data_access) - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x300, 1, 1, 0 - EXCEPTION_PROLOG_2_REAL data_access_common, EXC_STD, 1 - EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) - EXCEPTION_PROLOG_0 PACA_EXGEN - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x300, 1, 1, 0 -EXCEPTION_PROLOG_2_VIRT data_access_common, EXC_STD + INT_HANDLER data_access, 0x300, virt=1, dar=1, dsisr=1 EXC_VIRT_END(data_access, 0x4300, 0x80) - -TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) - +INT_KVM_HANDLER data_access, 0x300, EXC_STD, PACA_EXGEN, 1 EXC_COMMON_BEGIN(data_access_common) /* * Here r13 points to the paca, r9 contains the saved CR, @@ -1259,15 +1158,12 @@ EXC_COMMON_BEGIN(data_access_common) * r9 - r13 are saved in paca->exgen. * EX_DAR and EX_DSISR have saved DAR/DSISR */ - EXCEPTION_COMMON(PACA_EXGEN, 0x300) - RECONCILE_IRQ_STATE(r10, r11) - ld r12,_MSR(r1) - ld r3,PACA_EXGEN+EX_DAR(r13) - lwz r4,PACA_EXGEN+EX_DSISR(r13) - li r5,0x300 - std r3,_DAR(r1) - std r4,_DSISR(r1) + INT_COMMON 0x300, PACA_EXGEN, 1, 1, 1, 1, 1 + ld r4,_DAR(r1) + ld r5,_DSISR(r1) BEGIN_MMU_FTR_SECTION + ld r6,_MSR(r1) + li r3,0x300 b do_hash_page /* Try to handle as hpte fault */ MMU_FTR_SECTION_ELSE b handle_page_fault @@ -1275,26 +1171,15 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) - EXCEPTION_PROLOG_0 PACA_EXSLB - b tramp_real_data_access_slb + INT_HANDLER data_access_slb, 0x380, ool=1, area=PACA_EXSLB, dar=1, kvm=1 EXC_REAL_END(data_access_slb, 0x380, 0x80) - -TRAMP_REAL_BEGIN(tramp_real_data_access_slb) - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 1, 0x380, 1, 0, 0 - EXCEPTION_PROLOG_2_REAL data_access_slb_common, EXC_STD, 1 - EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) - EXCEPTION_PROLOG_0 PACA_EXSLB - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 0, 0x380, 1, 0, 0 - EXCEPTION_PROLOG_2_VIRT data_access_slb_common, EXC_STD + INT_HANDLER data_access_slb, 0x380, virt=1, area=PACA_EXSLB, dar=1 EXC_VIRT_END(data_access_slb, 0x4380, 0x80) - -TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) - +INT_KVM_HANDLER data_access_slb, 0x380, EXC_STD, PACA_EXSLB, 1 EXC_COMMON_BEGIN(data_access_slb_common) - EXCEPTION_COMMON(PACA_EXSLB, 0x380) - ld r4,PACA_EXSLB+EX_DAR(r13) - std r4,_DAR(r1) + INT_COMMON 0x380, PACA_EXSLB, 1, 1, 0, 1, 0 + ld r4,_DAR(r1) addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ @@ -1317,33 +1202,36 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) b ret_from_except -EXC_REAL(instruction_access, 0x400, 0x80) -EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400) -TRAMP_KVM(PACA_EXGEN, 0x400) - +EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) + INT_HANDLER instruction_access, 0x400, kvm=1 +EXC_REAL_END(instruction_access, 0x400, 0x80) +EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) + INT_HANDLER instruction_access, 0x400, virt=1 +EXC_VIRT_END(instruction_access, 0x4400, 0x80) +INT_KVM_HANDLER instruction_access, 0x400, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_BEGIN(instruction_access_common) - EXCEPTION_COMMON(PACA_EXGEN, 0x400) - RECONCILE_IRQ_STATE(r10, r11) - ld r12,_MSR(r1) - ld r3,_NIP(r1) - andis. r4,r12,DSISR_SRR1_MATCH_64S@h - li r5,0x400 - std r3,_DAR(r1) - std r4,_DSISR(r1) + INT_COMMON 0x400, PACA_EXGEN, 1, 1, 1, 2, 2 + ld r4,_DAR(r1) + ld r5,_DSISR(r1) BEGIN_MMU_FTR_SECTION + ld r6,_MSR(r1) + li r3,0x400 b do_hash_page /* Try to handle as hpte fault */ MMU_FTR_SECTION_ELSE b handle_page_fault ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) -__EXC_REAL(instruction_access_slb, 0x480, 0x80, PACA_EXSLB) -__EXC_VIRT(instruction_access_slb, 0x4480, 0x80, 0x480, PACA_EXSLB) -TRAMP_KVM(PACA_EXSLB, 0x480) - +EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) + INT_HANDLER instruction_access_slb, 0x480, area=PACA_EXSLB, kvm=1 +EXC_REAL_END(instruction_access_slb, 0x480, 0x80) +EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) + INT_HANDLER instruction_access_slb, 0x480, virt=1, area=PACA_EXSLB +EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) +INT_KVM_HANDLER instruction_access_slb, 0x480, EXC_STD, PACA_EXSLB, 0 EXC_COMMON_BEGIN(instruction_access_slb_common) - EXCEPTION_COMMON(PACA_EXSLB, 0x480) - ld r4,_NIP(r1) + INT_COMMON 0x480, PACA_EXSLB, 1, 1, 0, 2, 0 + ld r4,_DAR(r1) addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ @@ -1359,69 +1247,44 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) std r3,RESULT(r1) bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) - ld r4,_NIP(r1) + ld r4,_DAR(r1) ld r5,RESULT(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_bad_slb_fault b ret_from_except - EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) - EXCEPTION_PROLOG_0 PACA_EXGEN -BEGIN_FTR_SECTION - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED - EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_HV, 1 -FTR_SECTION_ELSE - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED - EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_STD, 1 -ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + INT_HANDLER hardware_interrupt, 0x500, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1 EXC_REAL_END(hardware_interrupt, 0x500, 0x100) - EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) - EXCEPTION_PROLOG_0 PACA_EXGEN -BEGIN_FTR_SECTION - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED - EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_HV -FTR_SECTION_ELSE - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, 0, 0, IRQS_DISABLED - EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_STD -ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) + INT_HANDLER hardware_interrupt, 0x500, virt=1, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1 EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) - -TRAMP_KVM(PACA_EXGEN, 0x500) -TRAMP_KVM_HV(PACA_EXGEN, 0x500) +INT_KVM_HANDLER hardware_interrupt, 0x500, EXC_HV_OR_STD, PACA_EXGEN, 0 EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ) EXC_REAL_BEGIN(alignment, 0x600, 0x100) - EXCEPTION_PROLOG_0 PACA_EXGEN - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x600, 1, 1, 0 - EXCEPTION_PROLOG_2_REAL alignment_common, EXC_STD, 1 + INT_HANDLER alignment, 0x600, dar=1, dsisr=1, kvm=1 EXC_REAL_END(alignment, 0x600, 0x100) - EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) - EXCEPTION_PROLOG_0 PACA_EXGEN - EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x600, 1, 1, 0 - EXCEPTION_PROLOG_2_VIRT alignment_common, EXC_STD + INT_HANDLER alignment, 0x600, virt=1, dar=1, dsisr=1 EXC_VIRT_END(alignment, 0x4600, 0x100) - -TRAMP_KVM(PACA_EXGEN, 0x600) +INT_KVM_HANDLER alignment, 0x600, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_BEGIN(alignment_common) - EXCEPTION_COMMON(PACA_EXGEN, 0x600) - ld r3,PACA_EXGEN+EX_DAR(r13) - lwz r4,PACA_EXGEN+EX_DSISR(r13) - std r3,_DAR(r1) - std r4,_DSISR(r1) + INT_COMMON 0x600, PACA_EXGEN, 1, 1, 1, 1, 1 bl save_nvgprs - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl alignment_exception b ret_from_except -EXC_REAL(program_check, 0x700, 0x100) -EXC_VIRT(program_check, 0x4700, 0x100, 0x700) -TRAMP_KVM(PACA_EXGEN, 0x700) +EXC_REAL_BEGIN(program_check, 0x700, 0x100) + INT_HANDLER program_check, 0x700, kvm=1 +EXC_REAL_END(program_check, 0x700, 0x100) +EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) + INT_HANDLER program_check, 0x700, virt=1 +EXC_VIRT_END(program_check, 0x4700, 0x100) +INT_KVM_HANDLER program_check, 0x700, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_BEGIN(program_check_common) /* * It's possible to receive a TM Bad Thing type program check with @@ -1447,27 +1310,33 @@ EXC_COMMON_BEGIN(program_check_common) mr r10,r1 /* Save r1 */ ld r1,PACAEMERGSP(r13) /* Use emergency stack */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ - b 3f /* Jump into the macro !! */ + INT_COMMON 0x700, PACA_EXGEN, 0, 1, 1, 0, 0 + b 3f 2: - EXCEPTION_COMMON(PACA_EXGEN, 0x700) + INT_COMMON 0x700, PACA_EXGEN, 1, 1, 1, 0, 0 +3: bl save_nvgprs - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl program_check_exception b ret_from_except -EXC_REAL(fp_unavailable, 0x800, 0x100) -EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800) -TRAMP_KVM(PACA_EXGEN, 0x800) +EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) + INT_HANDLER fp_unavailable, 0x800, kvm=1 +EXC_REAL_END(fp_unavailable, 0x800, 0x100) +EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) + INT_HANDLER fp_unavailable, 0x800, virt=1 +EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) +INT_KVM_HANDLER fp_unavailable, 0x800, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_BEGIN(fp_unavailable_common) - EXCEPTION_COMMON(PACA_EXGEN, 0x800) + INT_COMMON 0x800, PACA_EXGEN, 1, 1, 0, 0, 0 bne 1f /* if from user, just load it up */ bl save_nvgprs RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_fp_unavailable_exception - BUG_OPCODE +0: trap + EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 1: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION @@ -1490,21 +1359,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif -EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED) -EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) -TRAMP_KVM(PACA_EXGEN, 0x900) +EXC_REAL_BEGIN(decrementer, 0x900, 0x80) + INT_HANDLER decrementer, 0x900, ool=1, bitmask=IRQS_DISABLED, kvm=1 +EXC_REAL_END(decrementer, 0x900, 0x80) +EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) + INT_HANDLER decrementer, 0x900, virt=1, bitmask=IRQS_DISABLED +EXC_VIRT_END(decrementer, 0x4900, 0x80) +INT_KVM_HANDLER decrementer, 0x900, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) -EXC_REAL_HV(hdecrementer, 0x980, 0x80) -EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980) -TRAMP_KVM_HV(PACA_EXGEN, 0x980) +EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) + INT_HANDLER hdecrementer, 0x980, hsrr=EXC_HV, kvm=1 +EXC_REAL_END(hdecrementer, 0x980, 0x80) +EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) + INT_HANDLER hdecrementer, 0x980, virt=1, hsrr=EXC_HV, kvm=1 +EXC_VIRT_END(hdecrementer, 0x4980, 0x80) +INT_KVM_HANDLER hdecrementer, 0x980, EXC_HV, PACA_EXGEN, 0 EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt) -EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED) -EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED) -TRAMP_KVM(PACA_EXGEN, 0xa00) +EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) + INT_HANDLER doorbell_super, 0xa00, bitmask=IRQS_DISABLED, kvm=1 +EXC_REAL_END(doorbell_super, 0xa00, 0x100) +EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) + INT_HANDLER doorbell_super, 0xa00, virt=1, bitmask=IRQS_DISABLED +EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) +INT_KVM_HANDLER doorbell_super, 0xa00, EXC_STD, PACA_EXGEN, 0 #ifdef CONFIG_PPC_DOORBELL EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception) #else @@ -1512,17 +1393,13 @@ EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception) #endif -EXC_REAL(trap_0b, 0xb00, 0x100) -EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00) -TRAMP_KVM(PACA_EXGEN, 0xb00) -EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) +EXC_REAL_NONE(0xb00, 0x100) +EXC_VIRT_NONE(0x4b00, 0x100) /* * system call / hypercall (0xc00, 0x4c00) * * The system call exception is invoked with "sc 0" and does not alter HV bit. - * There is support for kernel code to invoke system calls but there are no - * in-tree users. * * The hypercall is invoked with "sc 1" and sets HV=1. * @@ -1567,7 +1444,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) GET_PACA(r13) std r10,PACA_EXGEN+EX_R10(r13) INTERRUPT_TO_KERNEL - KVMTEST EXC_STD 0xc00 /* uses r10, branch to do_kvm_0xc00_system_call */ + KVMTEST system_call EXC_STD 0xc00 /* uses r10, branch to system_call_kvm */ mfctr r9 #else mr r9,r13 @@ -1621,7 +1498,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) EXC_REAL_BEGIN(system_call, 0xc00, 0x100) SYSTEM_CALL 0 EXC_REAL_END(system_call, 0xc00, 0x100) - EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) SYSTEM_CALL 1 EXC_VIRT_END(system_call, 0x4c00, 0x100) @@ -1634,7 +1510,7 @@ EXC_VIRT_END(system_call, 0x4c00, 0x100) * ctr = orig r13 * orig r10 saved in PACA */ -TRAMP_KVM_BEGIN(do_kvm_0xc00) +TRAMP_KVM_BEGIN(system_call_kvm) /* * Save the PPR (on systems that support it) before changing to * HMT_MEDIUM. That allows the KVM code to save that value into the @@ -1647,32 +1523,33 @@ TRAMP_KVM_BEGIN(do_kvm_0xc00) SET_SCRATCH0(r10) std r9,PACA_EXGEN+EX_R9(r13) mfcr r9 - KVM_HANDLER PACA_EXGEN, EXC_STD, 0xc00, 0 + KVM_HANDLER 0xc00, EXC_STD, PACA_EXGEN, 0 #endif -EXC_REAL(single_step, 0xd00, 0x100) -EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00) -TRAMP_KVM(PACA_EXGEN, 0xd00) +EXC_REAL_BEGIN(single_step, 0xd00, 0x100) + INT_HANDLER single_step, 0xd00, kvm=1 +EXC_REAL_END(single_step, 0xd00, 0x100) +EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) + INT_HANDLER single_step, 0xd00, virt=1 +EXC_VIRT_END(single_step, 0x4d00, 0x100) +INT_KVM_HANDLER single_step, 0xd00, EXC_STD, PACA_EXGEN, 0 EXC_COMMON(single_step_common, 0xd00, single_step_exception) -EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20) -EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00) -TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00) + +EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) + INT_HANDLER h_data_storage, 0xe00, ool=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1 +EXC_REAL_END(h_data_storage, 0xe00, 0x20) +EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) + INT_HANDLER h_data_storage, 0xe00, ool=1, virt=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1 +EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) +INT_KVM_HANDLER h_data_storage, 0xe00, EXC_HV, PACA_EXGEN, 1 EXC_COMMON_BEGIN(h_data_storage_common) - mfspr r10,SPRN_HDAR - std r10,PACA_EXGEN+EX_DAR(r13) - mfspr r10,SPRN_HDSISR - stw r10,PACA_EXGEN+EX_DSISR(r13) - EXCEPTION_COMMON(PACA_EXGEN, 0xe00) + INT_COMMON 0xe00, PACA_EXGEN, 1, 1, 1, 1, 1 bl save_nvgprs - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION - ld r4,PACA_EXGEN+EX_DAR(r13) - lwz r5,PACA_EXGEN+EX_DSISR(r13) - std r4,_DAR(r1) - std r5,_DSISR(r1) + ld r4,_DAR(r1) li r5,SIGSEGV bl bad_page_fault MMU_FTR_SECTION_ELSE @@ -1681,15 +1558,23 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) b ret_from_except -EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20) -EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20) -TRAMP_KVM_HV(PACA_EXGEN, 0xe20) +EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) + INT_HANDLER h_instr_storage, 0xe20, ool=1, hsrr=EXC_HV, kvm=1 +EXC_REAL_END(h_instr_storage, 0xe20, 0x20) +EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) + INT_HANDLER h_instr_storage, 0xe20, ool=1, virt=1, hsrr=EXC_HV, kvm=1 +EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) +INT_KVM_HANDLER h_instr_storage, 0xe20, EXC_HV, PACA_EXGEN, 0 EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception) -EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20) -EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40) -TRAMP_KVM_HV(PACA_EXGEN, 0xe40) +EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) + INT_HANDLER emulation_assist, 0xe40, ool=1, hsrr=EXC_HV, kvm=1 +EXC_REAL_END(emulation_assist, 0xe40, 0x20) +EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) + INT_HANDLER emulation_assist, 0xe40, ool=1, virt=1, hsrr=EXC_HV, kvm=1 +EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) +INT_KVM_HANDLER emulation_assist, 0xe40, EXC_HV, PACA_EXGEN, 0 EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt) @@ -1699,16 +1584,10 @@ EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt) * mode. */ EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) - EXCEPTION_PROLOG_0 PACA_EXGEN - b hmi_exception_early + INT_HANDLER hmi_exception, 0xe60, ool=1, early=1, hsrr=EXC_HV, ri=0, kvm=1 EXC_REAL_END(hmi_exception, 0xe60, 0x20) EXC_VIRT_NONE(0x4e60, 0x20) -TRAMP_KVM_HV(PACA_EXGEN, 0xe60) -TRAMP_REAL_BEGIN(hmi_exception_early) - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, 0 - mfctr r10 /* save ctr, even for !RELOCATABLE */ - BRANCH_TO_C000(r11, hmi_exception_early_common) - +INT_KVM_HANDLER hmi_exception, 0xe60, EXC_HV, PACA_EXGEN, 0 EXC_COMMON_BEGIN(hmi_exception_early_common) mtctr r10 /* Restore ctr */ mfspr r11,SPRN_HSRR0 /* Save HSRR0 */ @@ -1716,10 +1595,10 @@ EXC_COMMON_BEGIN(hmi_exception_early_common) mr r10,r1 /* Save r1 */ ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ - EXCEPTION_PROLOG_COMMON_1() + /* We don't touch AMR here, we never go to virtual mode */ - EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) - EXCEPTION_PROLOG_COMMON_3(0xe60) + INT_COMMON 0xe60, PACA_EXGEN, 0, 0, 0, 0, 0 + addi r3,r1,STACK_FRAME_OVERHEAD bl hmi_exception_realmode cmpdi cr0,r3,0 @@ -1734,23 +1613,25 @@ EXC_COMMON_BEGIN(hmi_exception_early_common) * firmware. */ EXCEPTION_RESTORE_REGS EXC_HV - EXCEPTION_PROLOG_0 PACA_EXGEN - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0, 0, IRQS_DISABLED - EXCEPTION_PROLOG_2_REAL hmi_exception_common, EXC_HV, 1 + INT_HANDLER hmi_exception, 0xe60, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1 EXC_COMMON_BEGIN(hmi_exception_common) - EXCEPTION_COMMON(PACA_EXGEN, 0xe60) + INT_COMMON 0xe60, PACA_EXGEN, 1, 1, 1, 0, 0 FINISH_NAP - bl save_nvgprs - RECONCILE_IRQ_STATE(r10, r11) RUNLATCH_ON + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl handle_hmi_exception b ret_from_except -EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED) -EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED) -TRAMP_KVM_HV(PACA_EXGEN, 0xe80) + +EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) + INT_HANDLER h_doorbell, 0xe80, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1 +EXC_REAL_END(h_doorbell, 0xe80, 0x20) +EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) + INT_HANDLER h_doorbell, 0xe80, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1 +EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) +INT_KVM_HANDLER h_doorbell, 0xe80, EXC_HV, PACA_EXGEN, 0 #ifdef CONFIG_PPC_DOORBELL EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception) #else @@ -1758,9 +1639,13 @@ EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception) #endif -EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED) -EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED) -TRAMP_KVM_HV(PACA_EXGEN, 0xea0) +EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) + INT_HANDLER h_virt_irq, 0xea0, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1 +EXC_REAL_END(h_virt_irq, 0xea0, 0x20) +EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) + INT_HANDLER h_virt_irq, 0xea0, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1 +EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) +INT_KVM_HANDLER h_virt_irq, 0xea0, EXC_HV, PACA_EXGEN, 0 EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ) @@ -1770,17 +1655,25 @@ EXC_REAL_NONE(0xee0, 0x20) EXC_VIRT_NONE(0x4ee0, 0x20) -EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED) -EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED) -TRAMP_KVM(PACA_EXGEN, 0xf00) +EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) + INT_HANDLER performance_monitor, 0xf00, ool=1, bitmask=IRQS_PMI_DISABLED, kvm=1 +EXC_REAL_END(performance_monitor, 0xf00, 0x20) +EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) + INT_HANDLER performance_monitor, 0xf00, ool=1, virt=1, bitmask=IRQS_PMI_DISABLED +EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) +INT_KVM_HANDLER performance_monitor, 0xf00, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception) -EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20) -EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20) -TRAMP_KVM(PACA_EXGEN, 0xf20) +EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) + INT_HANDLER altivec_unavailable, 0xf20, ool=1, kvm=1 +EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) +EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) + INT_HANDLER altivec_unavailable, 0xf20, ool=1, virt=1 +EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) +INT_KVM_HANDLER altivec_unavailable, 0xf20, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_BEGIN(altivec_unavailable_common) - EXCEPTION_COMMON(PACA_EXGEN, 0xf20) + INT_COMMON 0xf20, PACA_EXGEN, 1, 1, 0, 0, 0 #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION beq 1f @@ -1813,11 +1706,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) b ret_from_except -EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20) -EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40) -TRAMP_KVM(PACA_EXGEN, 0xf40) +EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) + INT_HANDLER vsx_unavailable, 0xf40, ool=1, kvm=1 +EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) +EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) + INT_HANDLER vsx_unavailable, 0xf40, ool=1, virt=1 +EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) +INT_KVM_HANDLER vsx_unavailable, 0xf40, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_BEGIN(vsx_unavailable_common) - EXCEPTION_COMMON(PACA_EXGEN, 0xf40) + INT_COMMON 0xf40, PACA_EXGEN, 1, 1, 0, 0, 0 #ifdef CONFIG_VSX BEGIN_FTR_SECTION beq 1f @@ -1849,15 +1746,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) b ret_from_except -EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20) -EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60) -TRAMP_KVM(PACA_EXGEN, 0xf60) +EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) + INT_HANDLER facility_unavailable, 0xf60, ool=1, kvm=1 +EXC_REAL_END(facility_unavailable, 0xf60, 0x20) +EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) + INT_HANDLER facility_unavailable, 0xf60, ool=1, virt=1 +EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) +INT_KVM_HANDLER facility_unavailable, 0xf60, EXC_STD, PACA_EXGEN, 0 EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception) -EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20) -EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80) -TRAMP_KVM_HV(PACA_EXGEN, 0xf80) +EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) + INT_HANDLER h_facility_unavailable, 0xf80, ool=1, hsrr=EXC_HV, kvm=1 +EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) +EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) + INT_HANDLER h_facility_unavailable, 0xf80, ool=1, virt=1, hsrr=EXC_HV, kvm=1 +EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) +INT_KVM_HANDLER h_facility_unavailable, 0xf80, EXC_HV, PACA_EXGEN, 0 EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception) @@ -1874,9 +1779,11 @@ EXC_REAL_NONE(0x1100, 0x100) EXC_VIRT_NONE(0x5100, 0x100) #ifdef CONFIG_CBE_RAS -EXC_REAL_HV(cbe_system_error, 0x1200, 0x100) +EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) + INT_HANDLER cbe_system_error, 0x1200, ool=1, hsrr=EXC_HV, kvm=1 +EXC_REAL_END(cbe_system_error, 0x1200, 0x100) EXC_VIRT_NONE(0x5200, 0x100) -TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200) +INT_KVM_HANDLER cbe_system_error, 0x1200, EXC_HV, PACA_EXGEN, 1 EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception) #else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1200, 0x100) @@ -1884,37 +1791,43 @@ EXC_VIRT_NONE(0x5200, 0x100) #endif -EXC_REAL(instruction_breakpoint, 0x1300, 0x100) -EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300) -TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300) +EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) + INT_HANDLER instruction_breakpoint, 0x1300, kvm=1 +EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) +EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) + INT_HANDLER instruction_breakpoint, 0x1300, virt=1 +EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) +INT_KVM_HANDLER instruction_breakpoint, 0x1300, EXC_STD, PACA_EXGEN, 1 EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception) + EXC_REAL_NONE(0x1400, 0x100) EXC_VIRT_NONE(0x5400, 0x100) EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) - EXCEPTION_PROLOG_0 PACA_EXGEN - EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 0, 0x1500, 0, 0, 0 - + INT_HANDLER denorm_exception_hv, 0x1500, early=2, hsrr=EXC_HV #ifdef CONFIG_PPC_DENORMALISATION mfspr r10,SPRN_HSRR1 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ bne+ denorm_assist #endif - - KVMTEST EXC_HV 0x1500 - EXCEPTION_PROLOG_2_REAL denorm_common, EXC_HV, 1 + KVMTEST denorm_exception_hv, EXC_HV 0x1500 + INT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV, 1 EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100) #ifdef CONFIG_PPC_DENORMALISATION EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) - b exc_real_0x1500_denorm_exception_hv + INT_HANDLER denorm_exception, 0x1500, 0, 2, 1, EXC_HV, PACA_EXGEN, 1, 0, 0, 0, 0 + mfspr r10,SPRN_HSRR1 + andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ + bne+ denorm_assist + INT_VIRT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV EXC_VIRT_END(denorm_exception, 0x5500, 0x100) #else EXC_VIRT_NONE(0x5500, 0x100) #endif -TRAMP_KVM_HV(PACA_EXGEN, 0x1500) +INT_KVM_HANDLER denorm_exception_hv, 0x1500, EXC_HV, PACA_EXGEN, 0 #ifdef CONFIG_PPC_DENORMALISATION TRAMP_REAL_BEGIN(denorm_assist) @@ -1989,9 +1902,11 @@ EXC_COMMON(denorm_common, 0x1500, unknown_exception) #ifdef CONFIG_CBE_RAS -EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100) +EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) + INT_HANDLER cbe_maintenance, 0x1600, ool=1, hsrr=EXC_HV, kvm=1 +EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) EXC_VIRT_NONE(0x5600, 0x100) -TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600) +INT_KVM_HANDLER cbe_maintenance, 0x1600, EXC_HV, PACA_EXGEN, 1 EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception) #else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1600, 0x100) @@ -1999,9 +1914,13 @@ EXC_VIRT_NONE(0x5600, 0x100) #endif -EXC_REAL(altivec_assist, 0x1700, 0x100) -EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700) -TRAMP_KVM(PACA_EXGEN, 0x1700) +EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) + INT_HANDLER altivec_assist, 0x1700, kvm=1 +EXC_REAL_END(altivec_assist, 0x1700, 0x100) +EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) + INT_HANDLER altivec_assist, 0x1700, virt=1 +EXC_VIRT_END(altivec_assist, 0x5700, 0x100) +INT_KVM_HANDLER altivec_assist, 0x1700, EXC_STD, PACA_EXGEN, 0 #ifdef CONFIG_ALTIVEC EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception) #else @@ -2010,15 +1929,18 @@ EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception) #ifdef CONFIG_CBE_RAS -EXC_REAL_HV(cbe_thermal, 0x1800, 0x100) +EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) + INT_HANDLER cbe_thermal, 0x1800, ool=1, hsrr=EXC_HV, kvm=1 +EXC_REAL_END(cbe_thermal, 0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) -TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800) +INT_KVM_HANDLER cbe_thermal, 0x1800, EXC_HV, PACA_EXGEN, 1 EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception) #else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) #endif + #ifdef CONFIG_PPC_WATCHDOG #define MASKED_DEC_HANDLER_LABEL 3f @@ -2028,7 +1950,7 @@ EXC_VIRT_NONE(0x5800, 0x100) std r12,PACA_EXGEN+EX_R12(r13); \ GET_SCRATCH0(r10); \ std r10,PACA_EXGEN+EX_R13(r13); \ - EXCEPTION_PROLOG_2_REAL soft_nmi_common, _H, 1 + INT_SAVE_SRR_AND_JUMP soft_nmi_common, _H, 1 /* * Branch to soft_nmi_interrupt using the emergency stack. The emergency @@ -2043,9 +1965,8 @@ EXC_COMMON_BEGIN(soft_nmi_common) mr r10,r1 ld r1,PACAEMERGSP(r13) subi r1,r1,INT_FRAME_SIZE - EXCEPTION_COMMON_STACK(PACA_EXGEN, 0x900) + INT_COMMON 0x900, PACA_EXGEN, 0, 1, 1, 0, 0 bl save_nvgprs - RECONCILE_IRQ_STATE(r10, r11) addi r3,r1,STACK_FRAME_OVERHEAD bl soft_nmi_interrupt b ret_from_except @@ -2302,6 +2223,35 @@ CLOSE_FIXED_SECTION(virt_trampolines); USE_TEXT_SECTION() +/* MSR[RI] should be clear because this uses SRR[01] */ +enable_machine_check: + mflr r0 + bcl 20,31,$+4 +0: mflr r3 + addi r3,r3,(1f - 0b) + mtspr SPRN_SRR0,r3 + mfmsr r3 + ori r3,r3,MSR_ME + mtspr SPRN_SRR1,r3 + RFI_TO_KERNEL +1: mtlr r0 + blr + +/* MSR[RI] should be clear because this uses SRR[01] */ +disable_machine_check: + mflr r0 + bcl 20,31,$+4 +0: mflr r3 + addi r3,r3,(1f - 0b) + mtspr SPRN_SRR0,r3 + mfmsr r3 + li r4,MSR_ME + andc r3,r3,r4 + mtspr SPRN_SRR1,r3 + RFI_TO_KERNEL +1: mtlr r0 + blr + /* * Hash table stuff */ @@ -2310,7 +2260,7 @@ do_hash_page: #ifdef CONFIG_PPC_BOOK3S_64 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h ori r0,r0,DSISR_BAD_FAULT_64S@l - and. r0,r4,r0 /* weird error? */ + and. r0,r5,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ ld r11, PACA_THREAD_INFO(r13) lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ @@ -2318,15 +2268,13 @@ do_hash_page: bne 77f /* then don't call hash_page now */ /* - * r3 contains the faulting address - * r4 msr - * r5 contains the trap number - * r6 contains dsisr + * r3 contains the trap number + * r4 contains the faulting address + * r5 contains dsisr + * r6 msr * * at return r3 = 0 for success, 1 for page fault, negative for error */ - mr r4,r12 - ld r6,_DSISR(r1) bl __hash_page /* build HPTE if possible */ cmpdi r3,0 /* see if __hash_page succeeded */ @@ -2336,16 +2284,15 @@ do_hash_page: /* Error */ blt- 13f - /* Reload DSISR into r4 for the DABR check below */ - ld r4,_DSISR(r1) + /* Reload DAR/DSISR into r4/r5 for the DABR check below */ + ld r4,_DAR(r1) + ld r5,_DSISR(r1) #endif /* CONFIG_PPC_BOOK3S_64 */ /* Here we have a page fault that hash_page can't handle. */ handle_page_fault: -11: andis. r0,r4,DSISR_DABRMATCH@h +11: andis. r0,r5,DSISR_DABRMATCH@h bne- handle_dabr_fault - ld r4,_DAR(r1) - ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault cmpdi r3,0 @@ -2353,7 +2300,7 @@ handle_page_fault: bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD - lwz r4,_DAR(r1) + ld r4,_DAR(r1) bl bad_page_fault b ret_from_except @@ -2392,7 +2339,6 @@ handle_dabr_fault: * the access, or panic if there isn't a handler. */ 77: bl save_nvgprs - mr r4,r3 addi r3,r1,STACK_FRAME_OVERHEAD li r5,SIGSEGV bl bad_page_fault diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 4eab97292cc2ca..ed59855430b93c 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -28,24 +28,22 @@ #include #include #include -#include #include +#include #include static struct fw_dump fw_dump; -static struct fadump_mem_struct fdm; -static const struct fadump_mem_struct *fdm_active; -#ifdef CONFIG_CMA -static struct cma *fadump_cma; -#endif +static void __init fadump_reserve_crash_area(u64 base); + +#ifndef CONFIG_PRESERVE_FA_DUMP static DEFINE_MUTEX(fadump_mutex); -struct fad_crash_memory_ranges *crash_memory_ranges; -int crash_memory_ranges_size; -int crash_mem_ranges; -int max_crash_mem_ranges; +struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 }; +struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 }; #ifdef CONFIG_CMA +static struct cma *fadump_cma; + /* * fadump_cma_init() - Initialize CMA area from a fadump reserved memory * @@ -107,84 +105,45 @@ static int __init fadump_cma_init(void) { return 1; } #endif /* CONFIG_CMA */ /* Scan the Firmware Assisted dump configuration details. */ -int __init early_init_dt_scan_fw_dump(unsigned long node, - const char *uname, int depth, void *data) +int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, + int depth, void *data) { - const __be32 *sections; - int i, num_sections; - int size; - const __be32 *token; - - if (depth != 1 || strcmp(uname, "rtas") != 0) + if (depth != 1) return 0; - /* - * Check if Firmware Assisted dump is supported. if yes, check - * if dump has been initiated on last reboot. - */ - token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL); - if (!token) + if (strcmp(uname, "rtas") == 0) { + rtas_fadump_dt_scan(&fw_dump, node); return 1; + } - fw_dump.fadump_supported = 1; - fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token); - - /* - * The 'ibm,kernel-dump' rtas node is present only if there is - * dump data waiting for us. - */ - fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL); - if (fdm_active) - fw_dump.dump_active = 1; - - /* Get the sizes required to store dump data for the firmware provided - * dump sections. - * For each dump section type supported, a 32bit cell which defines - * the ID of a supported section followed by two 32 bit cells which - * gives teh size of the section in bytes. - */ - sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes", - &size); - - if (!sections) + if (strcmp(uname, "ibm,opal") == 0) { + opal_fadump_dt_scan(&fw_dump, node); return 1; - - num_sections = size / (3 * sizeof(u32)); - - for (i = 0; i < num_sections; i++, sections += 3) { - u32 type = (u32)of_read_number(sections, 1); - - switch (type) { - case FADUMP_CPU_STATE_DATA: - fw_dump.cpu_state_data_size = - of_read_ulong(§ions[1], 2); - break; - case FADUMP_HPTE_REGION: - fw_dump.hpte_region_size = - of_read_ulong(§ions[1], 2); - break; - } } - return 1; + return 0; } /* * If fadump is registered, check if the memory provided * falls within boot memory area and reserved memory area. */ -int is_fadump_memory_area(u64 addr, ulong size) +int is_fadump_memory_area(u64 addr, unsigned long size) { - u64 d_start = fw_dump.reserve_dump_area_start; - u64 d_end = d_start + fw_dump.reserve_dump_area_size; + u64 d_start, d_end; if (!fw_dump.dump_registered) return 0; + if (!size) + return 0; + + d_start = fw_dump.reserve_dump_area_start; + d_end = d_start + fw_dump.reserve_dump_area_size; if (((addr + size) > d_start) && (addr <= d_end)) return 1; - return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size; + return (addr <= fw_dump.boot_mem_top); } int should_fadump_crash(void) @@ -200,31 +159,29 @@ int is_fadump_active(void) } /* - * Returns 1, if there are no holes in boot memory area, - * 0 otherwise. + * Returns true, if there are no holes in memory area between d_start to d_end, + * false otherwise. */ -static int is_boot_memory_area_contiguous(void) +static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end) { struct memblock_region *reg; - unsigned long tstart, tend; - unsigned long start_pfn = PHYS_PFN(RMA_START); - unsigned long end_pfn = PHYS_PFN(RMA_START + fw_dump.boot_memory_size); - unsigned int ret = 0; + bool ret = false; + u64 start, end; for_each_memblock(memory, reg) { - tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); - tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); - if (tstart < tend) { - /* Memory hole from start_pfn to tstart */ - if (tstart > start_pfn) + start = max_t(u64, d_start, reg->base); + end = min_t(u64, d_end, (reg->base + reg->size)); + if (d_start < end) { + /* Memory hole from d_start to start */ + if (start > d_start) break; - if (tend == end_pfn) { - ret = 1; + if (end == d_end) { + ret = true; break; } - start_pfn = tend + 1; + d_start = end + 1; } } @@ -232,37 +189,45 @@ static int is_boot_memory_area_contiguous(void) } /* - * Returns true, if there are no holes in reserved memory area, + * Returns true, if there are no holes in boot memory area, * false otherwise. */ -static bool is_reserved_memory_area_contiguous(void) +bool is_fadump_boot_mem_contiguous(void) { - struct memblock_region *reg; - unsigned long start, end; - unsigned long d_start = fw_dump.reserve_dump_area_start; - unsigned long d_end = d_start + fw_dump.reserve_dump_area_size; - - for_each_memblock(memory, reg) { - start = max(d_start, (unsigned long)reg->base); - end = min(d_end, (unsigned long)(reg->base + reg->size)); - if (d_start < end) { - /* Memory hole from d_start to start */ - if (start > d_start) - break; + unsigned long d_start, d_end; + bool ret = false; + int i; - if (end == d_end) - return true; + for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { + d_start = fw_dump.boot_mem_addr[i]; + d_end = d_start + fw_dump.boot_mem_sz[i]; - d_start = end + 1; - } + ret = is_fadump_mem_area_contiguous(d_start, d_end); + if (!ret) + break; } - return false; + return ret; +} + +/* + * Returns true, if there are no holes in reserved memory area, + * false otherwise. + */ +bool is_fadump_reserved_mem_contiguous(void) +{ + u64 d_start, d_end; + + d_start = fw_dump.reserve_dump_area_start; + d_end = d_start + fw_dump.reserve_dump_area_size; + return is_fadump_mem_area_contiguous(d_start, d_end); } /* Print firmware assisted dump configurations for debugging purpose. */ static void fadump_show_config(void) { + int i; + pr_debug("Support for firmware-assisted dump (fadump): %s\n", (fw_dump.fadump_supported ? "present" : "no support")); @@ -276,62 +241,13 @@ static void fadump_show_config(void) pr_debug("Dump section sizes:\n"); pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size); pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size); - pr_debug("Boot memory size : %lx\n", fw_dump.boot_memory_size); -} - -static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, - unsigned long addr) -{ - if (!fdm) - return 0; - - memset(fdm, 0, sizeof(struct fadump_mem_struct)); - addr = addr & PAGE_MASK; - - fdm->header.dump_format_version = cpu_to_be32(0x00000001); - fdm->header.dump_num_sections = cpu_to_be16(3); - fdm->header.dump_status_flag = 0; - fdm->header.offset_first_dump_section = - cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data)); - - /* - * Fields for disk dump option. - * We are not using disk dump option, hence set these fields to 0. - */ - fdm->header.dd_block_size = 0; - fdm->header.dd_block_offset = 0; - fdm->header.dd_num_blocks = 0; - fdm->header.dd_offset_disk_path = 0; - - /* set 0 to disable an automatic dump-reboot. */ - fdm->header.max_time_auto = 0; - - /* Kernel dump sections */ - /* cpu state data section. */ - fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); - fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA); - fdm->cpu_state_data.source_address = 0; - fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size); - fdm->cpu_state_data.destination_address = cpu_to_be64(addr); - addr += fw_dump.cpu_state_data_size; - - /* hpte region section */ - fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); - fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION); - fdm->hpte_region.source_address = 0; - fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size); - fdm->hpte_region.destination_address = cpu_to_be64(addr); - addr += fw_dump.hpte_region_size; - - /* RMA region section */ - fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); - fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION); - fdm->rmr_region.source_address = cpu_to_be64(RMA_START); - fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size); - fdm->rmr_region.destination_address = cpu_to_be64(addr); - addr += fw_dump.boot_memory_size; - - return addr; + pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size); + pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top); + pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt); + for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { + pr_debug("[%03d] base = %llx, size = %llx\n", i, + fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]); + } } /** @@ -349,10 +265,10 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, * that is required for a kernel to boot successfully. * */ -static inline unsigned long fadump_calculate_reserve_size(void) +static inline u64 fadump_calculate_reserve_size(void) { + u64 base, size, bootmem_min; int ret; - unsigned long long base, size; if (fw_dump.reserve_bootvar) pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n"); @@ -402,7 +318,8 @@ static inline unsigned long fadump_calculate_reserve_size(void) if (memory_limit && size > memory_limit) size = memory_limit; - return (size > MIN_BOOT_MEM ? size : MIN_BOOT_MEM); + bootmem_min = fw_dump.ops->fadump_get_bootmem_min(); + return (size > bootmem_min ? size : bootmem_min); } /* @@ -423,57 +340,136 @@ static unsigned long get_fadump_area_size(void) size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2); size = PAGE_ALIGN(size); + + /* This is to hold kernel metadata on platforms that support it */ + size += (fw_dump.ops->fadump_get_metadata_size ? + fw_dump.ops->fadump_get_metadata_size() : 0); return size; } -static void __init fadump_reserve_crash_area(unsigned long base, - unsigned long size) +static int __init add_boot_mem_region(unsigned long rstart, + unsigned long rsize) +{ + int i = fw_dump.boot_mem_regs_cnt++; + + if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) { + fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS; + return 0; + } + + pr_debug("Added boot memory range[%d] [%#016lx-%#016lx)\n", + i, rstart, (rstart + rsize)); + fw_dump.boot_mem_addr[i] = rstart; + fw_dump.boot_mem_sz[i] = rsize; + return 1; +} + +/* + * Firmware usually has a hard limit on the data it can copy per region. + * Honour that by splitting a memory range into multiple regions. + */ +static int __init add_boot_mem_regions(unsigned long mstart, + unsigned long msize) { + unsigned long rstart, rsize, max_size; + int ret = 1; + + rstart = mstart; + max_size = fw_dump.max_copy_size ? fw_dump.max_copy_size : msize; + while (msize) { + if (msize > max_size) + rsize = max_size; + else + rsize = msize; + + ret = add_boot_mem_region(rstart, rsize); + if (!ret) + break; + + msize -= rsize; + rstart += rsize; + } + + return ret; +} + +static int __init fadump_get_boot_mem_regions(void) +{ + unsigned long base, size, cur_size, hole_size, last_end; + unsigned long mem_size = fw_dump.boot_memory_size; struct memblock_region *reg; - unsigned long mstart, mend, msize; + int ret = 1; + + fw_dump.boot_mem_regs_cnt = 0; + last_end = 0; + hole_size = 0; + cur_size = 0; for_each_memblock(memory, reg) { - mstart = max_t(unsigned long, base, reg->base); - mend = reg->base + reg->size; - mend = min(base + size, mend); - - if (mstart < mend) { - msize = mend - mstart; - memblock_reserve(mstart, msize); - pr_info("Reserved %ldMB of memory at %#016lx for saving crash dump\n", - (msize >> 20), mstart); + base = reg->base; + size = reg->size; + hole_size += (base - last_end); + + if ((cur_size + size) >= mem_size) { + size = (mem_size - cur_size); + ret = add_boot_mem_regions(base, size); + break; } + + mem_size -= size; + cur_size += size; + ret = add_boot_mem_regions(base, size); + if (!ret) + break; + + last_end = base + size; } + fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size); + + return ret; } int __init fadump_reserve_mem(void) { - unsigned long base, size, memory_boundary; + u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE; + bool is_memblock_bottom_up = memblock_bottom_up(); + int ret = 1; if (!fw_dump.fadump_enabled) return 0; if (!fw_dump.fadump_supported) { - printk(KERN_INFO "Firmware-assisted dump is not supported on" - " this hardware\n"); - fw_dump.fadump_enabled = 0; - return 0; + pr_info("Firmware-Assisted Dump is not supported on this hardware\n"); + goto error_out; } + /* * Initialize boot memory size * If dump is active then we have already calculated the size during * first kernel. */ - if (fdm_active) - fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len); - else { - fw_dump.boot_memory_size = fadump_calculate_reserve_size(); + if (!fw_dump.dump_active) { + fw_dump.boot_memory_size = + PAGE_ALIGN(fadump_calculate_reserve_size()); #ifdef CONFIG_CMA - if (!fw_dump.nocma) + if (!fw_dump.nocma) { + align = FADUMP_CMA_ALIGNMENT; fw_dump.boot_memory_size = - ALIGN(fw_dump.boot_memory_size, - FADUMP_CMA_ALIGNMENT); + ALIGN(fw_dump.boot_memory_size, align); + } #endif + + bootmem_min = fw_dump.ops->fadump_get_bootmem_min(); + if (fw_dump.boot_memory_size < bootmem_min) { + pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n", + fw_dump.boot_memory_size, bootmem_min); + goto error_out; + } + + if (!fadump_get_boot_mem_regions()) { + pr_err("Too many holes in boot memory area to enable fadump\n"); + goto error_out; + } } /* @@ -493,10 +489,13 @@ int __init fadump_reserve_mem(void) " dump, now %#016llx\n", memory_limit); } if (memory_limit) - memory_boundary = memory_limit; + mem_boundary = memory_limit; else - memory_boundary = memblock_end_of_DRAM(); + mem_boundary = memblock_end_of_DRAM(); + base = fw_dump.boot_mem_top; + size = get_fadump_area_size(); + fw_dump.reserve_dump_area_size = size; if (fw_dump.dump_active) { pr_info("Firmware-assisted dump is active.\n"); @@ -510,58 +509,55 @@ int __init fadump_reserve_mem(void) #endif /* * If last boot has crashed then reserve all the memory - * above boot_memory_size so that we don't touch it until + * above boot memory size so that we don't touch it until * dump is written to disk by userspace tool. This memory - * will be released for general use once the dump is saved. + * can be released for general use by invalidating fadump. */ - base = fw_dump.boot_memory_size; - size = memory_boundary - base; - fadump_reserve_crash_area(base, size); - - fw_dump.fadumphdr_addr = - be64_to_cpu(fdm_active->rmr_region.destination_address) + - be64_to_cpu(fdm_active->rmr_region.source_len); - pr_debug("fadumphdr_addr = %pa\n", &fw_dump.fadumphdr_addr); - fw_dump.reserve_dump_area_start = base; - fw_dump.reserve_dump_area_size = size; - } else { - size = get_fadump_area_size(); + fadump_reserve_crash_area(base); + pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr); + pr_debug("Reserve dump area start address: 0x%lx\n", + fw_dump.reserve_dump_area_start); + } else { /* * Reserve memory at an offset closer to bottom of the RAM to - * minimize the impact of memory hot-remove operation. We can't - * use memblock_find_in_range() here since it doesn't allocate - * from bottom to top. + * minimize the impact of memory hot-remove operation. */ - for (base = fw_dump.boot_memory_size; - base <= (memory_boundary - size); - base += size) { - if (memblock_is_region_memory(base, size) && - !memblock_is_region_reserved(base, size)) - break; + memblock_set_bottom_up(true); + base = memblock_find_in_range(base, mem_boundary, size, align); + + /* Restore the previous allocation mode */ + memblock_set_bottom_up(is_memblock_bottom_up); + + if (!base) { + pr_err("Failed to find memory chunk for reservation!\n"); + goto error_out; } - if ((base > (memory_boundary - size)) || - memblock_reserve(base, size)) { - pr_err("Failed to reserve memory\n"); - return 0; + fw_dump.reserve_dump_area_start = base; + + /* + * Calculate the kernel metadata address and register it with + * f/w if the platform supports. + */ + if (fw_dump.ops->fadump_setup_metadata && + (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0)) + goto error_out; + + if (memblock_reserve(base, size)) { + pr_err("Failed to reserve memory!\n"); + goto error_out; } - pr_info("Reserved %ldMB of memory at %ldMB for firmware-" - "assisted dump (System RAM: %ldMB)\n", - (unsigned long)(size >> 20), - (unsigned long)(base >> 20), - (unsigned long)(memblock_phys_mem_size() >> 20)); + pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n", + (size >> 20), base, (memblock_phys_mem_size() >> 20)); - fw_dump.reserve_dump_area_start = base; - fw_dump.reserve_dump_area_size = size; - return fadump_cma_init(); + ret = fadump_cma_init(); } - return 1; -} -unsigned long __init arch_reserved_kernel_pages(void) -{ - return memblock_reserved_size() / PAGE_SIZE; + return ret; +error_out: + fw_dump.fadump_enabled = 0; + return 0; } /* Look for fadump= cmdline option. */ @@ -596,61 +592,6 @@ static int __init early_fadump_reserve_mem(char *p) } early_param("fadump_reserve_mem", early_fadump_reserve_mem); -static int register_fw_dump(struct fadump_mem_struct *fdm) -{ - int rc, err; - unsigned int wait_time; - - pr_debug("Registering for firmware-assisted kernel dump...\n"); - - /* TODO: Add upper time limit for the delay */ - do { - rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL, - FADUMP_REGISTER, fdm, - sizeof(struct fadump_mem_struct)); - - wait_time = rtas_busy_delay_time(rc); - if (wait_time) - mdelay(wait_time); - - } while (wait_time); - - err = -EIO; - switch (rc) { - default: - pr_err("Failed to register. Unknown Error(%d).\n", rc); - break; - case -1: - printk(KERN_ERR "Failed to register firmware-assisted kernel" - " dump. Hardware Error(%d).\n", rc); - break; - case -3: - if (!is_boot_memory_area_contiguous()) - pr_err("Can't have holes in boot memory area while registering fadump\n"); - else if (!is_reserved_memory_area_contiguous()) - pr_err("Can't have holes in reserved memory area while" - " registering fadump\n"); - - printk(KERN_ERR "Failed to register firmware-assisted kernel" - " dump. Parameter Error(%d).\n", rc); - err = -EINVAL; - break; - case -9: - printk(KERN_ERR "firmware-assisted kernel dump is already " - " registered."); - fw_dump.dump_registered = 1; - err = -EEXIST; - break; - case 0: - printk(KERN_INFO "firmware-assisted kernel dump registration" - " is successful\n"); - fw_dump.dump_registered = 1; - err = 0; - break; - } - return err; -} - void crash_fadump(struct pt_regs *regs, const char *str) { struct fadump_crash_info_header *fdh = NULL; @@ -693,71 +634,10 @@ void crash_fadump(struct pt_regs *regs, const char *str) fdh->online_mask = *cpu_online_mask; - /* Call ibm,os-term rtas call to trigger firmware assisted dump */ - rtas_os_term((char *)str); -} - -#define GPR_MASK 0xffffff0000000000 -static inline int fadump_gpr_index(u64 id) -{ - int i = -1; - char str[3]; - - if ((id & GPR_MASK) == REG_ID("GPR")) { - /* get the digits at the end */ - id &= ~GPR_MASK; - id >>= 24; - str[2] = '\0'; - str[1] = id & 0xff; - str[0] = (id >> 8) & 0xff; - sscanf(str, "%d", &i); - if (i > 31) - i = -1; - } - return i; -} - -static inline void fadump_set_regval(struct pt_regs *regs, u64 reg_id, - u64 reg_val) -{ - int i; - - i = fadump_gpr_index(reg_id); - if (i >= 0) - regs->gpr[i] = (unsigned long)reg_val; - else if (reg_id == REG_ID("NIA")) - regs->nip = (unsigned long)reg_val; - else if (reg_id == REG_ID("MSR")) - regs->msr = (unsigned long)reg_val; - else if (reg_id == REG_ID("CTR")) - regs->ctr = (unsigned long)reg_val; - else if (reg_id == REG_ID("LR")) - regs->link = (unsigned long)reg_val; - else if (reg_id == REG_ID("XER")) - regs->xer = (unsigned long)reg_val; - else if (reg_id == REG_ID("CR")) - regs->ccr = (unsigned long)reg_val; - else if (reg_id == REG_ID("DAR")) - regs->dar = (unsigned long)reg_val; - else if (reg_id == REG_ID("DSISR")) - regs->dsisr = (unsigned long)reg_val; -} - -static struct fadump_reg_entry* -fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs) -{ - memset(regs, 0, sizeof(struct pt_regs)); - - while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) { - fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id), - be64_to_cpu(reg_entry->reg_value)); - reg_entry++; - } - reg_entry++; - return reg_entry; + fw_dump.ops->fadump_trigger(fdh, str); } -static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) +u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) { struct elf_prstatus prstatus; @@ -772,7 +652,7 @@ static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) return buf; } -static void fadump_update_elfcore_header(char *bufp) +void fadump_update_elfcore_header(char *bufp) { struct elfhdr *elf; struct elf_phdr *phdr; @@ -784,7 +664,7 @@ static void fadump_update_elfcore_header(char *bufp) phdr = (struct elf_phdr *)bufp; if (phdr->p_type == PT_NOTE) { - phdr->p_paddr = fw_dump.cpu_notes_buf; + phdr->p_paddr = __pa(fw_dump.cpu_notes_buf_vaddr); phdr->p_offset = phdr->p_paddr; phdr->p_filesz = fw_dump.cpu_notes_buf_size; phdr->p_memsz = fw_dump.cpu_notes_buf_size; @@ -792,228 +672,100 @@ static void fadump_update_elfcore_header(char *bufp) return; } -static void *fadump_cpu_notes_buf_alloc(unsigned long size) +static void *fadump_alloc_buffer(unsigned long size) { - void *vaddr; + unsigned long count, i; struct page *page; - unsigned long order, count, i; + void *vaddr; - order = get_order(size); - vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, order); + vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (!vaddr) return NULL; - count = 1 << order; + count = PAGE_ALIGN(size) / PAGE_SIZE; page = virt_to_page(vaddr); for (i = 0; i < count; i++) - SetPageReserved(page + i); + mark_page_reserved(page + i); return vaddr; } -static void fadump_cpu_notes_buf_free(unsigned long vaddr, unsigned long size) +static void fadump_free_buffer(unsigned long vaddr, unsigned long size) { - struct page *page; - unsigned long order, count, i; - - order = get_order(size); - count = 1 << order; - page = virt_to_page(vaddr); - for (i = 0; i < count; i++) - ClearPageReserved(page + i); - __free_pages(page, order); + free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL); } -/* - * Read CPU state dump data and convert it into ELF notes. - * The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be - * used to access the data to allow for additional fields to be added without - * affecting compatibility. Each list of registers for a CPU starts with - * "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes, - * 8 Byte ASCII identifier and 8 Byte register value. The register entry - * with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part - * of register value. For more details refer to PAPR document. - * - * Only for the crashing cpu we ignore the CPU dump data and get exact - * state from fadump crash info structure populated by first kernel at the - * time of crash. - */ -static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) +s32 fadump_setup_cpu_notes_buf(u32 num_cpus) { - struct fadump_reg_save_area_header *reg_header; - struct fadump_reg_entry *reg_entry; - struct fadump_crash_info_header *fdh = NULL; - void *vaddr; - unsigned long addr; - u32 num_cpus, *note_buf; - struct pt_regs regs; - int i, rc = 0, cpu = 0; - - if (!fdm->cpu_state_data.bytes_dumped) - return -EINVAL; - - addr = be64_to_cpu(fdm->cpu_state_data.destination_address); - vaddr = __va(addr); - - reg_header = vaddr; - if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) { - printk(KERN_ERR "Unable to read register save area.\n"); - return -ENOENT; - } - pr_debug("--------CPU State Data------------\n"); - pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number)); - pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset)); - - vaddr += be32_to_cpu(reg_header->num_cpu_offset); - num_cpus = be32_to_cpu(*((__be32 *)(vaddr))); - pr_debug("NumCpus : %u\n", num_cpus); - vaddr += sizeof(u32); - reg_entry = (struct fadump_reg_entry *)vaddr; - /* Allocate buffer to hold cpu crash notes. */ fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t); fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size); - note_buf = fadump_cpu_notes_buf_alloc(fw_dump.cpu_notes_buf_size); - if (!note_buf) { - printk(KERN_ERR "Failed to allocate 0x%lx bytes for " - "cpu notes buffer\n", fw_dump.cpu_notes_buf_size); + fw_dump.cpu_notes_buf_vaddr = + (unsigned long)fadump_alloc_buffer(fw_dump.cpu_notes_buf_size); + if (!fw_dump.cpu_notes_buf_vaddr) { + pr_err("Failed to allocate %ld bytes for CPU notes buffer\n", + fw_dump.cpu_notes_buf_size); return -ENOMEM; } - fw_dump.cpu_notes_buf = __pa(note_buf); - - pr_debug("Allocated buffer for cpu notes of size %ld at %p\n", - (num_cpus * sizeof(note_buf_t)), note_buf); - if (fw_dump.fadumphdr_addr) - fdh = __va(fw_dump.fadumphdr_addr); - - for (i = 0; i < num_cpus; i++) { - if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) { - printk(KERN_ERR "Unable to read CPU state data\n"); - rc = -ENOENT; - goto error_out; - } - /* Lower 4 bytes of reg_value contains logical cpu id */ - cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK; - if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) { - SKIP_TO_NEXT_CPU(reg_entry); - continue; - } - pr_debug("Reading register data for cpu %d...\n", cpu); - if (fdh && fdh->crashing_cpu == cpu) { - regs = fdh->regs; - note_buf = fadump_regs_to_elf_notes(note_buf, ®s); - SKIP_TO_NEXT_CPU(reg_entry); - } else { - reg_entry++; - reg_entry = fadump_read_registers(reg_entry, ®s); - note_buf = fadump_regs_to_elf_notes(note_buf, ®s); - } - } - final_note(note_buf); - - if (fdh) { - pr_debug("Updating elfcore header (%llx) with cpu notes\n", - fdh->elfcorehdr_addr); - fadump_update_elfcore_header((char *)__va(fdh->elfcorehdr_addr)); - } + pr_debug("Allocated buffer for cpu notes of size %ld at 0x%lx\n", + fw_dump.cpu_notes_buf_size, + fw_dump.cpu_notes_buf_vaddr); return 0; - -error_out: - fadump_cpu_notes_buf_free((unsigned long)__va(fw_dump.cpu_notes_buf), - fw_dump.cpu_notes_buf_size); - fw_dump.cpu_notes_buf = 0; - fw_dump.cpu_notes_buf_size = 0; - return rc; - } -/* - * Validate and process the dump data stored by firmware before exporting - * it through '/proc/vmcore'. - */ -static int __init process_fadump(const struct fadump_mem_struct *fdm_active) +void fadump_free_cpu_notes_buf(void) { - struct fadump_crash_info_header *fdh; - int rc = 0; - - if (!fdm_active || !fw_dump.fadumphdr_addr) - return -EINVAL; - - /* Check if the dump data is valid. */ - if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) || - (fdm_active->cpu_state_data.error_flags != 0) || - (fdm_active->rmr_region.error_flags != 0)) { - printk(KERN_ERR "Dump taken by platform is not valid\n"); - return -EINVAL; - } - if ((fdm_active->rmr_region.bytes_dumped != - fdm_active->rmr_region.source_len) || - !fdm_active->cpu_state_data.bytes_dumped) { - printk(KERN_ERR "Dump taken by platform is incomplete\n"); - return -EINVAL; - } - - /* Validate the fadump crash info header */ - fdh = __va(fw_dump.fadumphdr_addr); - if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { - printk(KERN_ERR "Crash info header is not valid.\n"); - return -EINVAL; - } - - rc = fadump_build_cpu_notes(fdm_active); - if (rc) - return rc; - - /* - * We are done validating dump info and elfcore header is now ready - * to be exported. set elfcorehdr_addr so that vmcore module will - * export the elfcore header through '/proc/vmcore'. - */ - elfcorehdr_addr = fdh->elfcorehdr_addr; + if (!fw_dump.cpu_notes_buf_vaddr) + return; - return 0; + fadump_free_buffer(fw_dump.cpu_notes_buf_vaddr, + fw_dump.cpu_notes_buf_size); + fw_dump.cpu_notes_buf_vaddr = 0; + fw_dump.cpu_notes_buf_size = 0; } -static void free_crash_memory_ranges(void) +static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info) { - kfree(crash_memory_ranges); - crash_memory_ranges = NULL; - crash_memory_ranges_size = 0; - max_crash_mem_ranges = 0; + kfree(mrange_info->mem_ranges); + mrange_info->mem_ranges = NULL; + mrange_info->mem_ranges_sz = 0; + mrange_info->max_mem_ranges = 0; } /* - * Allocate or reallocate crash memory ranges array in incremental units + * Allocate or reallocate mem_ranges array in incremental units * of PAGE_SIZE. */ -static int allocate_crash_memory_ranges(void) +static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info) { - struct fad_crash_memory_ranges *new_array; + struct fadump_memory_range *new_array; u64 new_size; - new_size = crash_memory_ranges_size + PAGE_SIZE; - pr_debug("Allocating %llu bytes of memory for crash memory ranges\n", - new_size); + new_size = mrange_info->mem_ranges_sz + PAGE_SIZE; + pr_debug("Allocating %llu bytes of memory for %s memory ranges\n", + new_size, mrange_info->name); - new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL); + new_array = krealloc(mrange_info->mem_ranges, new_size, GFP_KERNEL); if (new_array == NULL) { - pr_err("Insufficient memory for setting up crash memory ranges\n"); - free_crash_memory_ranges(); + pr_err("Insufficient memory for setting up %s memory ranges\n", + mrange_info->name); + fadump_free_mem_ranges(mrange_info); return -ENOMEM; } - crash_memory_ranges = new_array; - crash_memory_ranges_size = new_size; - max_crash_mem_ranges = (new_size / - sizeof(struct fad_crash_memory_ranges)); + mrange_info->mem_ranges = new_array; + mrange_info->mem_ranges_sz = new_size; + mrange_info->max_mem_ranges = (new_size / + sizeof(struct fadump_memory_range)); return 0; } -static inline int fadump_add_crash_memory(unsigned long long base, - unsigned long long end) +static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, + u64 base, u64 end) { - u64 start, size; + struct fadump_memory_range *mem_ranges = mrange_info->mem_ranges; bool is_adjacent = false; + u64 start, size; if (base == end) return 0; @@ -1022,38 +774,41 @@ static inline int fadump_add_crash_memory(unsigned long long base, * Fold adjacent memory ranges to bring down the memory ranges/ * PT_LOAD segments count. */ - if (crash_mem_ranges) { - start = crash_memory_ranges[crash_mem_ranges - 1].base; - size = crash_memory_ranges[crash_mem_ranges - 1].size; + if (mrange_info->mem_range_cnt) { + start = mem_ranges[mrange_info->mem_range_cnt - 1].base; + size = mem_ranges[mrange_info->mem_range_cnt - 1].size; if ((start + size) == base) is_adjacent = true; } if (!is_adjacent) { /* resize the array on reaching the limit */ - if (crash_mem_ranges == max_crash_mem_ranges) { + if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) { int ret; - ret = allocate_crash_memory_ranges(); + ret = fadump_alloc_mem_ranges(mrange_info); if (ret) return ret; + + /* Update to the new resized array */ + mem_ranges = mrange_info->mem_ranges; } start = base; - crash_memory_ranges[crash_mem_ranges].base = start; - crash_mem_ranges++; + mem_ranges[mrange_info->mem_range_cnt].base = start; + mrange_info->mem_range_cnt++; } - crash_memory_ranges[crash_mem_ranges - 1].size = (end - start); - pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", - (crash_mem_ranges - 1), start, end - 1, (end - start)); + mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start); + pr_debug("%s_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", + mrange_info->name, (mrange_info->mem_range_cnt - 1), + start, end - 1, (end - start)); return 0; } -static int fadump_exclude_reserved_area(unsigned long long start, - unsigned long long end) +static int fadump_exclude_reserved_area(u64 start, u64 end) { - unsigned long long ra_start, ra_end; + u64 ra_start, ra_end; int ret = 0; ra_start = fw_dump.reserve_dump_area_start; @@ -1061,18 +816,22 @@ static int fadump_exclude_reserved_area(unsigned long long start, if ((ra_start < end) && (ra_end > start)) { if ((start < ra_start) && (end > ra_end)) { - ret = fadump_add_crash_memory(start, ra_start); + ret = fadump_add_mem_range(&crash_mrange_info, + start, ra_start); if (ret) return ret; - ret = fadump_add_crash_memory(ra_end, end); + ret = fadump_add_mem_range(&crash_mrange_info, + ra_end, end); } else if (start < ra_start) { - ret = fadump_add_crash_memory(start, ra_start); + ret = fadump_add_mem_range(&crash_mrange_info, + start, ra_start); } else if (ra_end < end) { - ret = fadump_add_crash_memory(ra_end, end); + ret = fadump_add_mem_range(&crash_mrange_info, + ra_end, end); } } else - ret = fadump_add_crash_memory(start, end); + ret = fadump_add_mem_range(&crash_mrange_info, start, end); return ret; } @@ -1117,36 +876,36 @@ static int fadump_init_elfcore_header(char *bufp) static int fadump_setup_crash_memory_ranges(void) { struct memblock_region *reg; - unsigned long long start, end; - int ret; + u64 start, end; + int i, ret; pr_debug("Setup crash memory ranges.\n"); - crash_mem_ranges = 0; + crash_mrange_info.mem_range_cnt = 0; /* - * add the first memory chunk (RMA_START through boot_memory_size) as - * a separate memory chunk. The reason is, at the time crash firmware - * will move the content of this memory chunk to different location - * specified during fadump registration. We need to create a separate - * program header for this chunk with the correct offset. + * Boot memory region(s) registered with firmware are moved to + * different location at the time of crash. Create separate program + * header(s) for this memory chunk(s) with the correct offset. */ - ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); - if (ret) - return ret; + for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { + start = fw_dump.boot_mem_addr[i]; + end = start + fw_dump.boot_mem_sz[i]; + ret = fadump_add_mem_range(&crash_mrange_info, start, end); + if (ret) + return ret; + } for_each_memblock(memory, reg) { - start = (unsigned long long)reg->base; - end = start + (unsigned long long)reg->size; + start = (u64)reg->base; + end = start + (u64)reg->size; /* - * skip the first memory chunk that is already added (RMA_START - * through boot_memory_size). This logic needs a relook if and - * when RMA_START changes to a non-zero value. + * skip the memory chunk that is already added + * (0 through boot_memory_top). */ - BUILD_BUG_ON(RMA_START != 0); - if (start < fw_dump.boot_memory_size) { - if (end > fw_dump.boot_memory_size) - start = fw_dump.boot_memory_size; + if (start < fw_dump.boot_mem_top) { + if (end > fw_dump.boot_mem_top) + start = fw_dump.boot_mem_top; else continue; } @@ -1167,17 +926,35 @@ static int fadump_setup_crash_memory_ranges(void) */ static inline unsigned long fadump_relocate(unsigned long paddr) { - if (paddr > RMA_START && paddr < fw_dump.boot_memory_size) - return be64_to_cpu(fdm.rmr_region.destination_address) + paddr; - else - return paddr; + unsigned long raddr, rstart, rend, rlast, hole_size; + int i; + + hole_size = 0; + rlast = 0; + raddr = paddr; + for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { + rstart = fw_dump.boot_mem_addr[i]; + rend = rstart + fw_dump.boot_mem_sz[i]; + hole_size += (rstart - rlast); + + if (paddr >= rstart && paddr < rend) { + raddr += fw_dump.boot_mem_dest_addr - hole_size; + break; + } + + rlast = rend; + } + + pr_debug("vmcoreinfo: paddr = 0x%lx, raddr = 0x%lx\n", paddr, raddr); + return raddr; } static int fadump_create_elfcore_headers(char *bufp) { - struct elfhdr *elf; + unsigned long long raddr, offset; struct elf_phdr *phdr; - int i; + struct elfhdr *elf; + int i, j; fadump_init_elfcore_header(bufp); elf = (struct elfhdr *)bufp; @@ -1220,12 +997,14 @@ static int fadump_create_elfcore_headers(char *bufp) (elf->e_phnum)++; /* setup PT_LOAD sections. */ - - for (i = 0; i < crash_mem_ranges; i++) { - unsigned long long mbase, msize; - mbase = crash_memory_ranges[i].base; - msize = crash_memory_ranges[i].size; - + j = 0; + offset = 0; + raddr = fw_dump.boot_mem_addr[0]; + for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) { + u64 mbase, msize; + + mbase = crash_mrange_info.mem_ranges[i].base; + msize = crash_mrange_info.mem_ranges[i].size; if (!msize) continue; @@ -1235,13 +1014,17 @@ static int fadump_create_elfcore_headers(char *bufp) phdr->p_flags = PF_R|PF_W|PF_X; phdr->p_offset = mbase; - if (mbase == RMA_START) { + if (mbase == raddr) { /* - * The entire RMA region will be moved by firmware - * to the specified destination_address. Hence set - * the correct offset. + * The entire real memory region will be moved by + * firmware to the specified destination_address. + * Hence set the correct offset. */ - phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address); + phdr->p_offset = fw_dump.boot_mem_dest_addr + offset; + if (j < (fw_dump.boot_mem_regs_cnt - 1)) { + offset += fw_dump.boot_mem_sz[j]; + raddr = fw_dump.boot_mem_addr[++j]; + } } phdr->p_paddr = mbase; @@ -1263,7 +1046,6 @@ static unsigned long init_fadump_header(unsigned long addr) if (!addr) return 0; - fw_dump.fadumphdr_addr = addr; fdh = __va(addr); addr += sizeof(struct fadump_crash_info_header); @@ -1271,7 +1053,7 @@ static unsigned long init_fadump_header(unsigned long addr) fdh->magic_number = FADUMP_CRASH_INFO_MAGIC; fdh->elfcorehdr_addr = addr; /* We will set the crashing cpu id in crash_fadump() during crash. */ - fdh->crashing_cpu = CPU_UNKNOWN; + fdh->crashing_cpu = FADUMP_CPU_UNKNOWN; return addr; } @@ -1293,7 +1075,8 @@ static int register_fadump(void) if (ret) return ret; - addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); + addr = fw_dump.fadumphdr_addr; + /* Initialize fadump crash info header. */ addr = init_fadump_header(addr); vaddr = __va(addr); @@ -1302,74 +1085,27 @@ static int register_fadump(void) fadump_create_elfcore_headers(vaddr); /* register the future kernel dump with firmware. */ - return register_fw_dump(&fdm); -} - -static int fadump_unregister_dump(struct fadump_mem_struct *fdm) -{ - int rc = 0; - unsigned int wait_time; - - pr_debug("Un-register firmware-assisted dump\n"); - - /* TODO: Add upper time limit for the delay */ - do { - rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL, - FADUMP_UNREGISTER, fdm, - sizeof(struct fadump_mem_struct)); - - wait_time = rtas_busy_delay_time(rc); - if (wait_time) - mdelay(wait_time); - } while (wait_time); - - if (rc) { - printk(KERN_ERR "Failed to un-register firmware-assisted dump." - " unexpected error(%d).\n", rc); - return rc; - } - fw_dump.dump_registered = 0; - return 0; -} - -static int fadump_invalidate_dump(const struct fadump_mem_struct *fdm) -{ - int rc = 0; - unsigned int wait_time; - - pr_debug("Invalidating firmware-assisted dump registration\n"); - - /* TODO: Add upper time limit for the delay */ - do { - rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL, - FADUMP_INVALIDATE, fdm, - sizeof(struct fadump_mem_struct)); - - wait_time = rtas_busy_delay_time(rc); - if (wait_time) - mdelay(wait_time); - } while (wait_time); - - if (rc) { - pr_err("Failed to invalidate firmware-assisted dump registration. Unexpected error (%d).\n", rc); - return rc; - } - fw_dump.dump_active = 0; - fdm_active = NULL; - return 0; + pr_debug("Registering for firmware-assisted kernel dump...\n"); + return fw_dump.ops->fadump_register(&fw_dump); } void fadump_cleanup(void) { + if (!fw_dump.fadump_supported) + return; + /* Invalidate the registration only if dump is active. */ if (fw_dump.dump_active) { - /* pass the same memory dump structure provided by platform */ - fadump_invalidate_dump(fdm_active); + pr_debug("Invalidating firmware-assisted dump registration\n"); + fw_dump.ops->fadump_invalidate(&fw_dump); } else if (fw_dump.dump_registered) { /* Un-register Firmware-assisted dump if it was registered. */ - fadump_unregister_dump(&fdm); - free_crash_memory_ranges(); + fw_dump.ops->fadump_unregister(&fw_dump); + fadump_free_mem_ranges(&crash_mrange_info); } + + if (fw_dump.ops->fadump_cleanup) + fw_dump.ops->fadump_cleanup(&fw_dump); } static void fadump_free_reserved_memory(unsigned long start_pfn, @@ -1394,90 +1130,197 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, /* * Skip memory holes and free memory that was actually reserved. */ -static void fadump_release_reserved_area(unsigned long start, unsigned long end) +static void fadump_release_reserved_area(u64 start, u64 end) { + u64 tstart, tend, spfn, epfn; struct memblock_region *reg; - unsigned long tstart, tend; - unsigned long start_pfn = PHYS_PFN(start); - unsigned long end_pfn = PHYS_PFN(end); + spfn = PHYS_PFN(start); + epfn = PHYS_PFN(end); for_each_memblock(memory, reg) { - tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); - tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); + tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); + tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); - if (tend == end_pfn) + if (tend == epfn) break; - start_pfn = tend + 1; + spfn = tend; } } } /* - * Release the memory that was reserved in early boot to preserve the memory - * contents. The released memory will be available for general use. + * Sort the mem ranges in-place and merge adjacent ranges + * to minimize the memory ranges count. */ -static void fadump_release_memory(unsigned long begin, unsigned long end) +static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) { - unsigned long ra_start, ra_end; + struct fadump_memory_range *mem_ranges; + struct fadump_memory_range tmp_range; + u64 base, size; + int i, j, idx; + + if (!reserved_mrange_info.mem_range_cnt) + return; + + /* Sort the memory ranges */ + mem_ranges = mrange_info->mem_ranges; + for (i = 0; i < mrange_info->mem_range_cnt; i++) { + idx = i; + for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) { + if (mem_ranges[idx].base > mem_ranges[j].base) + idx = j; + } + if (idx != i) { + tmp_range = mem_ranges[idx]; + mem_ranges[idx] = mem_ranges[i]; + mem_ranges[i] = tmp_range; + } + } + + /* Merge adjacent reserved ranges */ + idx = 0; + for (i = 1; i < mrange_info->mem_range_cnt; i++) { + base = mem_ranges[i-1].base; + size = mem_ranges[i-1].size; + if (mem_ranges[i].base == (base + size)) + mem_ranges[idx].size += mem_ranges[i].size; + else { + idx++; + if (i == idx) + continue; + + mem_ranges[idx] = mem_ranges[i]; + } + } + mrange_info->mem_range_cnt = idx + 1; +} + +/* + * Scan reserved-ranges to consider them while reserving/releasing + * memory for FADump. + */ +static inline int fadump_scan_reserved_mem_ranges(void) +{ + struct device_node *root; + const __be32 *prop; + int len, ret = -1; + unsigned long i; + + root = of_find_node_by_path("/"); + if (!root) + return ret; + + prop = of_get_property(root, "reserved-ranges", &len); + if (!prop) + return ret; + + /* + * Each reserved range is an (address,size) pair, 2 cells each, + * totalling 4 cells per range. + */ + for (i = 0; i < len / (sizeof(*prop) * 4); i++) { + u64 base, size; + + base = of_read_number(prop + (i * 4) + 0, 2); + size = of_read_number(prop + (i * 4) + 2, 2); + + if (size) { + ret = fadump_add_mem_range(&reserved_mrange_info, + base, base + size); + if (ret < 0) { + pr_warn("some reserved ranges are ignored!\n"); + break; + } + } + } + + return ret; +} + +/* + * Release the memory that was reserved during early boot to preserve the + * crash'ed kernel's memory contents except reserved dump area (permanent + * reservation) and reserved ranges used by F/W. The released memory will + * be available for general use. + */ +static void fadump_release_memory(u64 begin, u64 end) +{ + u64 ra_start, ra_end, tstart; + int i, ret; + + fadump_scan_reserved_mem_ranges(); ra_start = fw_dump.reserve_dump_area_start; ra_end = ra_start + fw_dump.reserve_dump_area_size; /* - * exclude the dump reserve area. Will reuse it for next - * fadump registration. + * Add reserved dump area to reserved ranges list + * and exclude all these ranges while releasing memory. */ - if (begin < ra_end && end > ra_start) { - if (begin < ra_start) - fadump_release_reserved_area(begin, ra_start); - if (end > ra_end) - fadump_release_reserved_area(ra_end, end); - } else - fadump_release_reserved_area(begin, end); + ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); + if (ret != 0) { + /* + * Not enough memory to setup reserved ranges but the system is + * running shortage of memory. So, release all the memory except + * Reserved dump area (reused for next fadump registration). + */ + if (begin < ra_end && end > ra_start) { + if (begin < ra_start) + fadump_release_reserved_area(begin, ra_start); + if (end > ra_end) + fadump_release_reserved_area(ra_end, end); + } else + fadump_release_reserved_area(begin, end); + + return; + } + + /* Get the reserved ranges list in order first. */ + sort_and_merge_mem_ranges(&reserved_mrange_info); + + /* Exclude reserved ranges and release remaining memory */ + tstart = begin; + for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) { + ra_start = reserved_mrange_info.mem_ranges[i].base; + ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size; + + if (tstart >= ra_end) + continue; + + if (tstart < ra_start) + fadump_release_reserved_area(tstart, ra_start); + tstart = ra_end; + } + + if (tstart < end) + fadump_release_reserved_area(tstart, end); } static void fadump_invalidate_release_mem(void) { - unsigned long reserved_area_start, reserved_area_end; - unsigned long destination_address; - mutex_lock(&fadump_mutex); if (!fw_dump.dump_active) { mutex_unlock(&fadump_mutex); return; } - destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address); fadump_cleanup(); mutex_unlock(&fadump_mutex); + fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM()); + fadump_free_cpu_notes_buf(); + /* - * Save the current reserved memory bounds we will require them - * later for releasing the memory for general use. - */ - reserved_area_start = fw_dump.reserve_dump_area_start; - reserved_area_end = reserved_area_start + - fw_dump.reserve_dump_area_size; - /* - * Setup reserve_dump_area_start and its size so that we can - * reuse this reserved memory for Re-registration. + * Setup kernel metadata and initialize the kernel dump + * memory structure for FADump re-registration. */ - fw_dump.reserve_dump_area_start = destination_address; - fw_dump.reserve_dump_area_size = get_fadump_area_size(); - - fadump_release_memory(reserved_area_start, reserved_area_end); - if (fw_dump.cpu_notes_buf) { - fadump_cpu_notes_buf_free( - (unsigned long)__va(fw_dump.cpu_notes_buf), - fw_dump.cpu_notes_buf_size); - fw_dump.cpu_notes_buf = 0; - fw_dump.cpu_notes_buf_size = 0; - } - /* Initialize the kernel dump memory structure for FAD registration. */ - init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); + if (fw_dump.ops->fadump_setup_metadata && + (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0)) + pr_warn("Failed to setup kernel metadata!\n"); + fw_dump.ops->fadump_init_mem_struct(&fw_dump); } static ssize_t fadump_release_memory_store(struct kobject *kobj, @@ -1528,7 +1371,7 @@ static ssize_t fadump_register_store(struct kobject *kobj, int ret = 0; int input = -1; - if (!fw_dump.fadump_enabled || fdm_active) + if (!fw_dump.fadump_enabled || fw_dump.dump_active) return -EPERM; if (kstrtoint(buf, 0, &input)) @@ -1541,13 +1384,15 @@ static ssize_t fadump_register_store(struct kobject *kobj, if (fw_dump.dump_registered == 0) { goto unlock_out; } + /* Un-register Firmware-assisted dump */ - fadump_unregister_dump(&fdm); + pr_debug("Un-register firmware-assisted dump\n"); + fw_dump.ops->fadump_unregister(&fw_dump); break; case 1: if (fw_dump.dump_registered == 1) { /* Un-register Firmware-assisted dump */ - fadump_unregister_dump(&fdm); + fw_dump.ops->fadump_unregister(&fw_dump); } /* Register Firmware-assisted dump */ ret = register_fadump(); @@ -1564,62 +1409,12 @@ static ssize_t fadump_register_store(struct kobject *kobj, static int fadump_region_show(struct seq_file *m, void *private) { - const struct fadump_mem_struct *fdm_ptr; - if (!fw_dump.fadump_enabled) return 0; mutex_lock(&fadump_mutex); - if (fdm_active) - fdm_ptr = fdm_active; - else { - mutex_unlock(&fadump_mutex); - fdm_ptr = &fdm; - } - - seq_printf(m, - "CPU : [%#016llx-%#016llx] %#llx bytes, " - "Dumped: %#llx\n", - be64_to_cpu(fdm_ptr->cpu_state_data.destination_address), - be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) + - be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1, - be64_to_cpu(fdm_ptr->cpu_state_data.source_len), - be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped)); - seq_printf(m, - "HPTE: [%#016llx-%#016llx] %#llx bytes, " - "Dumped: %#llx\n", - be64_to_cpu(fdm_ptr->hpte_region.destination_address), - be64_to_cpu(fdm_ptr->hpte_region.destination_address) + - be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, - be64_to_cpu(fdm_ptr->hpte_region.source_len), - be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); - seq_printf(m, - "DUMP: [%#016llx-%#016llx] %#llx bytes, " - "Dumped: %#llx\n", - be64_to_cpu(fdm_ptr->rmr_region.destination_address), - be64_to_cpu(fdm_ptr->rmr_region.destination_address) + - be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1, - be64_to_cpu(fdm_ptr->rmr_region.source_len), - be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); - - if (!fdm_active || - (fw_dump.reserve_dump_area_start == - be64_to_cpu(fdm_ptr->cpu_state_data.destination_address))) - goto out; - - /* Dump is active. Show reserved memory region. */ - seq_printf(m, - " : [%#016llx-%#016llx] %#llx bytes, " - "Dumped: %#llx\n", - (unsigned long long)fw_dump.reserve_dump_area_start, - be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1, - be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - - fw_dump.reserve_dump_area_start, - be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - - fw_dump.reserve_dump_area_start); -out: - if (fdm_active) - mutex_unlock(&fadump_mutex); + fw_dump.ops->fadump_region_show(&fw_dump, m); + mutex_unlock(&fadump_mutex); return 0; } @@ -1690,14 +1485,77 @@ int __init setup_fadump(void) * if dump process fails then invalidate the registration * and release memory before proceeding for re-registration. */ - if (process_fadump(fdm_active) < 0) + if (fw_dump.ops->fadump_process(&fw_dump) < 0) fadump_invalidate_release_mem(); } /* Initialize the kernel dump memory structure for FAD registration. */ else if (fw_dump.reserve_dump_area_size) - init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); + fw_dump.ops->fadump_init_mem_struct(&fw_dump); + fadump_init_files(); return 1; } subsys_initcall(setup_fadump); +#else /* !CONFIG_PRESERVE_FA_DUMP */ + +/* Scan the Firmware Assisted dump configuration details. */ +int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, + int depth, void *data) +{ + if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0)) + return 0; + + opal_fadump_dt_scan(&fw_dump, node); + return 1; +} + +/* + * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel, + * preserve crash data. The subsequent memory preserving kernel boot + * is likely to process this crash data. + */ +int __init fadump_reserve_mem(void) +{ + if (fw_dump.dump_active) { + /* + * If last boot has crashed then reserve all the memory + * above boot memory to preserve crash data. + */ + pr_info("Preserving crash data for processing in next boot.\n"); + fadump_reserve_crash_area(fw_dump.boot_mem_top); + } else + pr_debug("FADump-aware kernel..\n"); + + return 1; +} +#endif /* CONFIG_PRESERVE_FA_DUMP */ + +/* Preserve everything above the base address */ +static void __init fadump_reserve_crash_area(u64 base) +{ + struct memblock_region *reg; + u64 mstart, msize; + + for_each_memblock(memory, reg) { + mstart = reg->base; + msize = reg->size; + + if ((mstart + msize) < base) + continue; + + if (mstart < base) { + msize -= (base - mstart); + mstart = base; + } + + pr_info("Reserving %lluMB of memory at %#016llx for preserving crash data", + (msize >> 20), mstart); + memblock_reserve(mstart, msize); + } +} + +unsigned long __init arch_reserved_kernel_pages(void) +{ + return memblock_reserved_size() / PAGE_SIZE; +} diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index f255e22184b48b..4a24f8f026c797 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -34,7 +34,16 @@ #include "head_32.h" -/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ +/* 601 only have IBAT */ +#ifdef CONFIG_PPC_BOOK3S_601 +#define LOAD_BAT(n, reg, RA, RB) \ + li RA,0; \ + mtspr SPRN_IBAT##n##U,RA; \ + lwz RA,(n*16)+0(reg); \ + lwz RB,(n*16)+4(reg); \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_IBAT##n##L,RB +#else #define LOAD_BAT(n, reg, RA, RB) \ /* see the comment for clear_bats() -- Cort */ \ li RA,0; \ @@ -44,12 +53,11 @@ lwz RB,(n*16)+4(reg); \ mtspr SPRN_IBAT##n##U,RA; \ mtspr SPRN_IBAT##n##L,RB; \ - beq 1f; \ lwz RA,(n*16)+8(reg); \ lwz RB,(n*16)+12(reg); \ mtspr SPRN_DBAT##n##U,RA; \ - mtspr SPRN_DBAT##n##L,RB; \ -1: + mtspr SPRN_DBAT##n##L,RB +#endif __HEAD .stabs "arch/powerpc/kernel/",N_SO,0,0,0f @@ -557,9 +565,9 @@ DataStoreTLBMiss: cmplw 0,r1,r3 mfspr r2, SPRN_SPRG_PGDIR #ifdef CONFIG_SWAP - li r1, _PAGE_RW | _PAGE_PRESENT | _PAGE_ACCESSED + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED #else - li r1, _PAGE_RW | _PAGE_PRESENT + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT #endif bge- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ @@ -820,9 +828,6 @@ load_up_mmu: /* Load the BAT registers with the values set up by MMU_init. MMU_init takes care of whether we're on a 601 or not. */ - mfpvr r3 - srwi r3,r3,16 - cmpwi r3,1 lis r3,BATS@ha addi r3,r3,BATS@l tophys(r3,r3) @@ -897,9 +902,11 @@ start_here: bl machine_init bl __save_cpu_setup bl MMU_init +#ifdef CONFIG_KASAN BEGIN_MMU_FTR_SECTION bl MMU_init_hw_patch END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif /* * Go back to running unmapped so we can load up new values @@ -996,11 +1003,8 @@ EXPORT_SYMBOL(switch_mmu_context) */ clear_bats: li r10,0 - mfspr r9,SPRN_PVR - rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ - cmpwi r9, 1 - beq 1f +#ifndef CONFIG_PPC_BOOK3S_601 mtspr SPRN_DBAT0U,r10 mtspr SPRN_DBAT0L,r10 mtspr SPRN_DBAT1U,r10 @@ -1009,7 +1013,7 @@ clear_bats: mtspr SPRN_DBAT2L,r10 mtspr SPRN_DBAT3U,r10 mtspr SPRN_DBAT3L,r10 -1: +#endif mtspr SPRN_IBAT0U,r10 mtspr SPRN_IBAT0L,r10 mtspr SPRN_IBAT1U,r10 @@ -1104,10 +1108,7 @@ mmu_off: */ initial_bats: lis r11,PAGE_OFFSET@h - mfspr r9,SPRN_PVR - rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ - cmpwi 0,r9,1 - bne 4f +#ifdef CONFIG_PPC_BOOK3S_601 ori r11,r11,4 /* set up BAT registers for 601 */ li r8,0x7f /* valid, block length = 8MB */ mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ @@ -1120,10 +1121,8 @@ initial_bats: addis r8,r8,0x800000@h mtspr SPRN_IBAT2U,r11 mtspr SPRN_IBAT2L,r8 - isync - blr - -4: tophys(r8,r11) +#else + tophys(r8,r11) #ifdef CONFIG_SMP ori r8,r8,0x12 /* R/W access, M=1 */ #else @@ -1135,10 +1134,10 @@ initial_bats: mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ mtspr SPRN_IBAT0L,r8 mtspr SPRN_IBAT0U,r11 +#endif isync blr - #ifdef CONFIG_BOOTX_TEXT setup_disp_bat: /* @@ -1153,15 +1152,13 @@ setup_disp_bat: beqlr lwz r11,0(r8) lwz r8,4(r8) - mfspr r9,SPRN_PVR - rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ - cmpwi 0,r9,1 - beq 1f +#ifndef CONFIG_PPC_BOOK3S_601 mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3U,r11 - blr -1: mtspr SPRN_IBAT3L,r8 +#else + mtspr SPRN_IBAT3L,r8 mtspr SPRN_IBAT3U,r11 +#endif blr #endif /* CONFIG_BOOTX_TEXT */ diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index 4a692553651f0c..8abc7783dbe5a7 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -4,19 +4,6 @@ #include /* for STACK_FRAME_REGS_MARKER */ -/* - * MSR_KERNEL is > 0x8000 on 4xx/Book-E since it include MSR_CE. - */ -.macro __LOAD_MSR_KERNEL r, x -.if \x >= 0x8000 - lis \r, (\x)@h - ori \r, \r, (\x)@l -.else - li \r, (\x) -.endif -.endm -#define LOAD_MSR_KERNEL(r, x) __LOAD_MSR_KERNEL r, x - /* * Exception entry code. This code runs with address translation * turned off, i.e. using physical addresses. @@ -92,7 +79,7 @@ #ifdef CONFIG_40x rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ #else - LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */ + LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */ MTMSRD(r10) /* (except for mach check in rtas) */ #endif lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ @@ -140,10 +127,10 @@ * otherwise we might risk taking an interrupt before we tell lockdep * they are enabled. */ - LOAD_MSR_KERNEL(r10, MSR_KERNEL) + LOAD_REG_IMMEDIATE(r10, MSR_KERNEL) rlwimi r10, r9, 0, MSR_EE #else - LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE) + LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) #endif #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) mtspr SPRN_NRI, r0 @@ -187,7 +174,7 @@ #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \ li r10,trap; \ stw r10,_TRAP(r11); \ - LOAD_MSR_KERNEL(r10, msr); \ + LOAD_REG_IMMEDIATE(r10, msr); \ bl tfer; \ .long hdlr; \ .long ret diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 91d297e696dd99..ad79fddb974dee 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -182,7 +182,8 @@ __secondary_hold: isync bctr #else - BUG_OPCODE +0: trap + EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 #endif CLOSE_FIXED_SECTION(first_256B) @@ -635,7 +636,7 @@ __after_prom_start: sub r5,r5,r11 #else /* just copy interrupts */ - LOAD_REG_IMMEDIATE(r5, FIXED_SYMBOL_ABS_ADDR(__end_interrupts)) + LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts)) #endif b 5f 3: @@ -998,7 +999,8 @@ start_here_common: bl start_kernel /* Not reached */ - BUG_OPCODE + trap + EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 /* * We put a few things here that have to be page-aligned. diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 5ab9178c234736..19f583e18402e2 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -15,6 +15,7 @@ */ #include +#include #include #include #include @@ -574,8 +575,6 @@ InstructionBreakpoint: * by decoding the registers used by the dcbx instruction and adding them. * DAR is set to the calculated address. */ - /* define if you don't want to use self modifying code */ -#define NO_SELF_MODIFYING_CODE FixupDAR:/* Entry point for dcbx workaround. */ mtspr SPRN_M_TW, r10 /* fetch instruction from memory. */ @@ -639,27 +638,6 @@ FixupDAR:/* Entry point for dcbx workaround. */ rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ mtspr SPRN_DSISR, r10 142: /* continue, it was a dcbx, dcbi instruction. */ -#ifndef NO_SELF_MODIFYING_CODE - andis. r10,r11,0x1f /* test if reg RA is r0 */ - li r10,modified_instr@l - dcbtst r0,r10 /* touch for store */ - rlwinm r11,r11,0,0,20 /* Zero lower 10 bits */ - oris r11,r11,640 /* Transform instr. to a "add r10,RA,RB" */ - ori r11,r11,532 - stw r11,0(r10) /* store add/and instruction */ - dcbf 0,r10 /* flush new instr. to memory. */ - icbi 0,r10 /* invalidate instr. cache line */ - mfspr r11, SPRN_SPRG_SCRATCH1 /* restore r11 */ - mfspr r10, SPRN_SPRG_SCRATCH0 /* restore r10 */ - isync /* Wait until new instr is loaded from memory */ -modified_instr: - .space 4 /* this is where the add instr. is stored */ - bne+ 143f - subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */ -143: mtdar r10 /* store faulting EA in DAR */ - mfspr r10,SPRN_M_TW - b DARFixed /* Go back to normal TLB handling */ -#else mfctr r10 mtdar r10 /* save ctr reg in DAR */ rlwinm r10, r11, 24, 24, 28 /* offset into jump table for reg RB */ @@ -723,7 +701,6 @@ modified_instr: add r10, r10, r11 /* add it */ mfctr r11 /* restore r11 */ b 151b -#endif /* * This is where the main kernel code starts. @@ -741,6 +718,9 @@ start_here: /* stack */ lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l + lis r0, STACK_END_MAGIC@h + ori r0, r0, STACK_END_MAGIC@l + stw r0, 0(r1) li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index c8d1fa2e9d535c..1007ec36b4cb91 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -195,18 +195,63 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) tsk->thread.last_hit_ubp = NULL; } +static bool is_larx_stcx_instr(struct pt_regs *regs, unsigned int instr) +{ + int ret, type; + struct instruction_op op; + + ret = analyse_instr(&op, regs, instr); + type = GETTYPE(op.type); + return (!ret && (type == LARX || type == STCX)); +} + /* * Handle debug exception notifications. */ +static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, + unsigned long addr) +{ + unsigned int instr = 0; + + if (__get_user_inatomic(instr, (unsigned int *)regs->nip)) + goto fail; + + if (is_larx_stcx_instr(regs, instr)) { + printk_ratelimited("Breakpoint hit on instruction that can't be emulated." + " Breakpoint at 0x%lx will be disabled.\n", addr); + goto disable; + } + + /* Do not emulate user-space instructions, instead single-step them */ + if (user_mode(regs)) { + current->thread.last_hit_ubp = bp; + regs->msr |= MSR_SE; + return false; + } + + if (!emulate_step(regs, instr)) + goto fail; + + return true; + +fail: + /* + * We've failed in reliably handling the hw-breakpoint. Unregister + * it and throw a warning message to let the user know about it. + */ + WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " + "0x%lx will be disabled.", addr); + +disable: + perf_event_disable_inatomic(bp); + return false; +} + int hw_breakpoint_handler(struct die_args *args) { int rc = NOTIFY_STOP; struct perf_event *bp; struct pt_regs *regs = args->regs; -#ifndef CONFIG_PPC_8xx - int stepped = 1; - unsigned int instr; -#endif struct arch_hw_breakpoint *info; unsigned long dar = regs->dar; @@ -251,31 +296,9 @@ int hw_breakpoint_handler(struct die_args *args) (dar - bp->attr.bp_addr < bp->attr.bp_len))) info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; -#ifndef CONFIG_PPC_8xx - /* Do not emulate user-space instructions, instead single-step them */ - if (user_mode(regs)) { - current->thread.last_hit_ubp = bp; - regs->msr |= MSR_SE; + if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info->address)) goto out; - } - - stepped = 0; - instr = 0; - if (!__get_user_inatomic(instr, (unsigned int *) regs->nip)) - stepped = emulate_step(regs, instr); - /* - * emulate_step() could not execute it. We've failed in reliably - * handling the hw-breakpoint. Unregister it and throw a warning - * message to let the user know about it. - */ - if (!stepped) { - WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " - "0x%lx will be disabled.", info->address); - perf_event_disable_inatomic(bp); - goto out; - } -#endif /* * As a policy, the callback is invoked in a 'trigger-after-execute' * fashion diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index fbd2d0007c52cb..0276bc8c8969d8 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -149,8 +149,8 @@ static const struct ppc_pci_io iowa_pci_io = { }; #ifdef CONFIG_PPC_INDIRECT_MMIO -static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, - pgprot_t prot, void *caller) +void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, + pgprot_t prot, void *caller) { struct iowa_bus *bus; void __iomem *res = __ioremap_caller(addr, size, prot, caller); @@ -163,20 +163,17 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, } return res; } -#else /* CONFIG_PPC_INDIRECT_MMIO */ -#define iowa_ioremap NULL #endif /* !CONFIG_PPC_INDIRECT_MMIO */ +bool io_workaround_inited; + /* Enable IO workaround */ static void io_workaround_init(void) { - static int io_workaround_inited; - if (io_workaround_inited) return; ppc_pci_io = iowa_pci_io; - ppc_md.ioremap = iowa_ioremap; - io_workaround_inited = 1; + io_workaround_inited = true; } /* Register new bus to support workaround */ diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 0a67ce9f827eb0..9704f3f76e63ea 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -633,11 +633,54 @@ static void iommu_table_clear(struct iommu_table *tbl) #endif } +static void iommu_table_reserve_pages(struct iommu_table *tbl, + unsigned long res_start, unsigned long res_end) +{ + int i; + + WARN_ON_ONCE(res_end < res_start); + /* + * Reserve page 0 so it will not be used for any mappings. + * This avoids buggy drivers that consider page 0 to be invalid + * to crash the machine or even lose data. + */ + if (tbl->it_offset == 0) + set_bit(0, tbl->it_map); + + tbl->it_reserved_start = res_start; + tbl->it_reserved_end = res_end; + + /* Check if res_start..res_end isn't empty and overlaps the table */ + if (res_start && res_end && + (tbl->it_offset + tbl->it_size < res_start || + res_end < tbl->it_offset)) + return; + + for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) + set_bit(i - tbl->it_offset, tbl->it_map); +} + +static void iommu_table_release_pages(struct iommu_table *tbl) +{ + int i; + + /* + * In case we have reserved the first bit, we should not emit + * the warning below. + */ + if (tbl->it_offset == 0) + clear_bit(0, tbl->it_map); + + for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) + clear_bit(i - tbl->it_offset, tbl->it_map); +} + /* * Build a iommu_table structure. This contains a bit map which * is used to manage allocation of the tce space. */ -struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) +struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, + unsigned long res_start, unsigned long res_end) { unsigned long sz; static int welcomed = 0; @@ -656,13 +699,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) tbl->it_map = page_address(page); memset(tbl->it_map, 0, sz); - /* - * Reserve page 0 so it will not be used for any mappings. - * This avoids buggy drivers that consider page 0 to be invalid - * to crash the machine or even lose data. - */ - if (tbl->it_offset == 0) - set_bit(0, tbl->it_map); + iommu_table_reserve_pages(tbl, res_start, res_end); /* We only split the IOMMU table if we have 1GB or more of space */ if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) @@ -714,12 +751,7 @@ static void iommu_table_free(struct kref *kref) return; } - /* - * In case we have reserved the first bit, we should not emit - * the warning below. - */ - if (tbl->it_offset == 0) - clear_bit(0, tbl->it_map); + iommu_table_release_pages(tbl); /* verify that table contains no entries */ if (!bitmap_empty(tbl->it_map, tbl->it_size)) @@ -981,29 +1013,32 @@ int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa) } EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); -long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, +extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, + struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) { long ret; unsigned long size = 0; - ret = tbl->it_ops->exchange(tbl, entry, hpa, direction); - + ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); if (!ret && ((*direction == DMA_FROM_DEVICE) || (*direction == DMA_BIDIRECTIONAL)) && !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, &size)) SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); - /* if (unlikely(ret)) - pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", - __func__, hwaddr, entry << tbl->it_page_shift, - hwaddr, ret); */ - return ret; } -EXPORT_SYMBOL_GPL(iommu_tce_xchg); +EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill); + +void iommu_tce_kill(struct iommu_table *tbl, + unsigned long entry, unsigned long pages) +{ + if (tbl->it_ops->tce_kill) + tbl->it_ops->tce_kill(tbl, entry, pages, false); +} +EXPORT_SYMBOL_GPL(iommu_tce_kill); int iommu_take_ownership(struct iommu_table *tbl) { @@ -1017,22 +1052,21 @@ int iommu_take_ownership(struct iommu_table *tbl) * requires exchange() callback defined so if it is not * implemented, we disallow taking ownership over the table. */ - if (!tbl->it_ops->exchange) + if (!tbl->it_ops->xchg_no_kill) return -EINVAL; spin_lock_irqsave(&tbl->large_pool.lock, flags); for (i = 0; i < tbl->nr_pools; i++) spin_lock(&tbl->pools[i].lock); - if (tbl->it_offset == 0) - clear_bit(0, tbl->it_map); + iommu_table_release_pages(tbl); if (!bitmap_empty(tbl->it_map, tbl->it_size)) { pr_err("iommu_tce: it_map is not empty"); ret = -EBUSY; - /* Restore bit#0 set by iommu_init_table() */ - if (tbl->it_offset == 0) - set_bit(0, tbl->it_map); + /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */ + iommu_table_reserve_pages(tbl, tbl->it_reserved_start, + tbl->it_reserved_end); } else { memset(tbl->it_map, 0xff, sz); } @@ -1055,9 +1089,8 @@ void iommu_release_ownership(struct iommu_table *tbl) memset(tbl->it_map, 0, sz); - /* Restore bit#0 set by iommu_init_table() */ - if (tbl->it_offset == 0) - set_bit(0, tbl->it_map); + iommu_table_reserve_pages(tbl, tbl->it_reserved_start, + tbl->it_reserved_end); for (i = 0; i < tbl->nr_pools; i++) spin_unlock(&tbl->pools[i].lock); diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index b7b3a5e4e22414..617eba82531cb9 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -64,16 +64,17 @@ #define KVM_INST_MTSRIN 0x7c0001e4 static bool kvm_patching_worked = true; -char kvm_tmp[1024 * 1024]; +extern char kvm_tmp[]; +extern char kvm_tmp_end[]; static int kvm_tmp_index; -static inline void kvm_patch_ins(u32 *inst, u32 new_inst) +static void __init kvm_patch_ins(u32 *inst, u32 new_inst) { *inst = new_inst; flush_icache_range((ulong)inst, (ulong)inst + 4); } -static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) +static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) { #ifdef CONFIG_64BIT kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); @@ -82,7 +83,7 @@ static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) #endif } -static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) +static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) { #ifdef CONFIG_64BIT kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); @@ -91,12 +92,12 @@ static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) #endif } -static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) +static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) { kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); } -static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) +static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt) { #ifdef CONFIG_64BIT kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); @@ -105,17 +106,17 @@ static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) #endif } -static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) +static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) { kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); } -static void kvm_patch_ins_nop(u32 *inst) +static void __init kvm_patch_ins_nop(u32 *inst) { kvm_patch_ins(inst, KVM_INST_NOP); } -static void kvm_patch_ins_b(u32 *inst, int addr) +static void __init kvm_patch_ins_b(u32 *inst, int addr) { #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) /* On relocatable kernels interrupts handlers and our code @@ -128,11 +129,11 @@ static void kvm_patch_ins_b(u32 *inst, int addr) kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK)); } -static u32 *kvm_alloc(int len) +static u32 * __init kvm_alloc(int len) { u32 *p; - if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) { + if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) { printk(KERN_ERR "KVM: No more space (%d + %d)\n", kvm_tmp_index, len); kvm_patching_worked = false; @@ -151,7 +152,7 @@ extern u32 kvm_emulate_mtmsrd_orig_ins_offs; extern u32 kvm_emulate_mtmsrd_len; extern u32 kvm_emulate_mtmsrd[]; -static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) +static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) { u32 *p; int distance_start; @@ -204,7 +205,7 @@ extern u32 kvm_emulate_mtmsr_orig_ins_offs; extern u32 kvm_emulate_mtmsr_len; extern u32 kvm_emulate_mtmsr[]; -static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) +static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt) { u32 *p; int distance_start; @@ -265,7 +266,7 @@ extern u32 kvm_emulate_wrtee_orig_ins_offs; extern u32 kvm_emulate_wrtee_len; extern u32 kvm_emulate_wrtee[]; -static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) +static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) { u32 *p; int distance_start; @@ -322,7 +323,7 @@ extern u32 kvm_emulate_wrteei_0_branch_offs; extern u32 kvm_emulate_wrteei_0_len; extern u32 kvm_emulate_wrteei_0[]; -static void kvm_patch_ins_wrteei_0(u32 *inst) +static void __init kvm_patch_ins_wrteei_0(u32 *inst) { u32 *p; int distance_start; @@ -363,7 +364,7 @@ extern u32 kvm_emulate_mtsrin_orig_ins_offs; extern u32 kvm_emulate_mtsrin_len; extern u32 kvm_emulate_mtsrin[]; -static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) +static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) { u32 *p; int distance_start; @@ -399,7 +400,7 @@ static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) #endif -static void kvm_map_magic_page(void *data) +static void __init kvm_map_magic_page(void *data) { u32 *features = data; @@ -414,7 +415,7 @@ static void kvm_map_magic_page(void *data) *features = out[0]; } -static void kvm_check_ins(u32 *inst, u32 features) +static void __init kvm_check_ins(u32 *inst, u32 features) { u32 _inst = *inst; u32 inst_no_rt = _inst & ~KVM_MASK_RT; @@ -658,7 +659,7 @@ static void kvm_check_ins(u32 *inst, u32 features) extern u32 kvm_template_start[]; extern u32 kvm_template_end[]; -static void kvm_use_magic_page(void) +static void __init kvm_use_magic_page(void) { u32 *p; u32 *start, *end; @@ -699,25 +700,13 @@ static void kvm_use_magic_page(void) kvm_patching_worked ? "worked" : "failed"); } -static __init void kvm_free_tmp(void) -{ - /* - * Inform kmemleak about the hole in the .bss section since the - * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y. - */ - kmemleak_free_part(&kvm_tmp[kvm_tmp_index], - ARRAY_SIZE(kvm_tmp) - kvm_tmp_index); - free_reserved_area(&kvm_tmp[kvm_tmp_index], - &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); -} - static int __init kvm_guest_init(void) { if (!kvm_para_available()) - goto free_tmp; + return 0; if (!epapr_paravirt_enabled) - goto free_tmp; + return 0; if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE)) kvm_use_magic_page(); @@ -727,9 +716,6 @@ static int __init kvm_guest_init(void) powersave_nap = 1; #endif -free_tmp: - kvm_free_tmp(); - return 0; } diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S index eb2568f583aedd..7af6f8b50c5d63 100644 --- a/arch/powerpc/kernel/kvm_emul.S +++ b/arch/powerpc/kernel/kvm_emul.S @@ -192,6 +192,8 @@ kvm_emulate_mtmsr_orig_ins_offs: kvm_emulate_mtmsr_len: .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 +#ifdef CONFIG_BOOKE + /* also used for wrteei 1 */ .global kvm_emulate_wrtee kvm_emulate_wrtee: @@ -285,6 +287,10 @@ kvm_emulate_wrteei_0_branch_offs: kvm_emulate_wrteei_0_len: .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4 +#endif /* CONFIG_BOOKE */ + +#ifdef CONFIG_PPC_BOOK3S_32 + .global kvm_emulate_mtsrin kvm_emulate_mtsrin: @@ -334,5 +340,15 @@ kvm_emulate_mtsrin_orig_ins_offs: kvm_emulate_mtsrin_len: .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 +#endif /* CONFIG_PPC_BOOK3S_32 */ + + .balign 4 + .global kvm_tmp +kvm_tmp: + .space (64 * 1024) + +.global kvm_tmp_end +kvm_tmp_end: + .global kvm_template_end kvm_template_end: diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 18481b0e2788a4..04a7cba58eff4f 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include int default_machine_kexec_prepare(struct kimage *image) { @@ -327,6 +329,13 @@ void default_machine_kexec(struct kimage *image) #ifdef CONFIG_PPC_PSERIES kexec_paca.lppaca_ptr = NULL; #endif + + if (is_secure_guest() && !(image->preserve_context || + image->type == KEXEC_TYPE_CRASH)) { + uv_unshare_all_pages(); + printk("kexec: Unshared all shared pages.\n"); + } + paca_ptrs[kexec_paca.paca_index] = &kexec_paca; setup_paca(&kexec_paca); diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index b18df633eae930..34c1001e9e8bf1 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -33,13 +33,18 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_ue_event_queue); static void machine_check_process_queued_event(struct irq_work *work); -void machine_check_ue_event(struct machine_check_event *evt); +static void machine_check_ue_irq_work(struct irq_work *work); +static void machine_check_ue_event(struct machine_check_event *evt); static void machine_process_ue_event(struct work_struct *work); static struct irq_work mce_event_process_work = { .func = machine_check_process_queued_event, }; +static struct irq_work mce_ue_event_irq_work = { + .func = machine_check_ue_irq_work, +}; + DECLARE_WORK(mce_ue_event_work, machine_process_ue_event); static void mce_set_error_info(struct machine_check_event *mce, @@ -144,6 +149,7 @@ void save_mce_event(struct pt_regs *regs, long handled, if (phys_addr != ULONG_MAX) { mce->u.ue_error.physical_address_provided = true; mce->u.ue_error.physical_address = phys_addr; + mce->u.ue_error.ignore_event = mce_err->ignore_event; machine_check_ue_event(mce); } } @@ -199,11 +205,15 @@ void release_mce_event(void) get_mce_event(NULL, true); } +static void machine_check_ue_irq_work(struct irq_work *work) +{ + schedule_work(&mce_ue_event_work); +} /* * Queue up the MCE event which then can be handled later. */ -void machine_check_ue_event(struct machine_check_event *evt) +static void machine_check_ue_event(struct machine_check_event *evt) { int index; @@ -216,7 +226,7 @@ void machine_check_ue_event(struct machine_check_event *evt) memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt)); /* Queue work to process this event later. */ - schedule_work(&mce_ue_event_work); + irq_work_queue(&mce_ue_event_irq_work); } /* @@ -257,8 +267,17 @@ static void machine_process_ue_event(struct work_struct *work) /* * This should probably queued elsewhere, but * oh! well + * + * Don't report this machine check because the caller has a + * asked us to ignore the event, it has a fixup handler which + * will do the appropriate error handling and reporting. */ if (evt->error_type == MCE_ERROR_TYPE_UE) { + if (evt->u.ue_error.ignore_event) { + __this_cpu_dec(mce_ue_count); + continue; + } + if (evt->u.ue_error.physical_address_provided) { unsigned long pfn; @@ -292,6 +311,12 @@ static void machine_check_process_queued_event(struct irq_work *work) while (__this_cpu_read(mce_queue_count) > 0) { index = __this_cpu_read(mce_queue_count) - 1; evt = this_cpu_ptr(&mce_event_queue[index]); + + if (evt->error_type == MCE_ERROR_TYPE_UE && + evt->u.ue_error.ignore_event) { + __this_cpu_dec(mce_queue_count); + continue; + } machine_check_print_event_info(evt, false, false); __this_cpu_dec(mce_queue_count); } @@ -300,7 +325,7 @@ static void machine_check_process_queued_event(struct irq_work *work) void machine_check_print_event_info(struct machine_check_event *evt, bool user_mode, bool in_guest) { - const char *level, *sevstr, *subtype, *err_type; + const char *level, *sevstr, *subtype, *err_type, *initiator; uint64_t ea = 0, pa = 0; int n = 0; char dar_str[50]; @@ -385,6 +410,28 @@ void machine_check_print_event_info(struct machine_check_event *evt, break; } + switch(evt->initiator) { + case MCE_INITIATOR_CPU: + initiator = "CPU"; + break; + case MCE_INITIATOR_PCI: + initiator = "PCI"; + break; + case MCE_INITIATOR_ISA: + initiator = "ISA"; + break; + case MCE_INITIATOR_MEMORY: + initiator = "Memory"; + break; + case MCE_INITIATOR_POWERMGM: + initiator = "Power Management"; + break; + case MCE_INITIATOR_UNKNOWN: + default: + initiator = "Unknown"; + break; + } + switch (evt->error_type) { case MCE_ERROR_TYPE_UE: err_type = "UE"; @@ -451,6 +498,14 @@ void machine_check_print_event_info(struct machine_check_event *evt, if (evt->u.link_error.effective_address_provided) ea = evt->u.link_error.effective_address; break; + case MCE_ERROR_TYPE_DCACHE: + err_type = "D-Cache"; + subtype = "Unknown"; + break; + case MCE_ERROR_TYPE_ICACHE: + err_type = "I-Cache"; + subtype = "Unknown"; + break; default: case MCE_ERROR_TYPE_UNKNOWN: err_type = "Unknown"; @@ -483,9 +538,17 @@ void machine_check_print_event_info(struct machine_check_event *evt, level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str); } + printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator); + subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ? mc_error_class[evt->error_class] : "Unknown"; printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype); + +#ifdef CONFIG_PPC_BOOK3S_64 + /* Display faulty slb contents for SLB errors. */ + if (evt->error_type == MCE_ERROR_TYPE_SLB) + slb_dump_contents(local_paca->mce_faulty_slbs); +#endif } EXPORT_SYMBOL_GPL(machine_check_print_event_info); diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index a814d2dfb5b05c..1cbf7f1a4e3d89 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -18,6 +19,7 @@ #include #include #include +#include /* * Convert an address related to an mm to a PFN. NOTE: we are in real @@ -26,7 +28,8 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) { pte_t *ptep; - unsigned long flags; + unsigned int shift; + unsigned long pfn, flags; struct mm_struct *mm; if (user_mode(regs)) @@ -35,14 +38,23 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) mm = &init_mm; local_irq_save(flags); - if (mm == current->mm) - ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL); - else - ptep = find_init_mm_pte(addr, NULL); + ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift); + + if (!ptep || pte_special(*ptep)) { + pfn = ULONG_MAX; + goto out; + } + + if (shift <= PAGE_SHIFT) + pfn = pte_pfn(*ptep); + else { + unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; + pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask))); + } + +out: local_irq_restore(flags); - if (!ptep || pte_special(*ptep)) - return ULONG_MAX; - return pte_pfn(*ptep); + return pfn; } /* flush SLBs and reload */ @@ -344,7 +356,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = { MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, { 0, false, 0, 0, 0, 0, 0 } }; -static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr, +static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, uint64_t *phys_addr) { /* @@ -397,6 +409,8 @@ static int mce_handle_ierror(struct pt_regs *regs, /* attempt to correct the error */ switch (table[i].error_type) { case MCE_ERROR_TYPE_SLB: + if (local_paca->in_mce == 1) + slb_save_contents(local_paca->mce_faulty_slbs); handled = mce_flush(MCE_FLUSH_SLB); break; case MCE_ERROR_TYPE_ERAT: @@ -482,6 +496,8 @@ static int mce_handle_derror(struct pt_regs *regs, /* attempt to correct the error */ switch (table[i].error_type) { case MCE_ERROR_TYPE_SLB: + if (local_paca->in_mce == 1) + slb_save_contents(local_paca->mce_faulty_slbs); if (mce_flush(MCE_FLUSH_SLB)) handled = 1; break; @@ -541,7 +557,8 @@ static int mce_handle_derror(struct pt_regs *regs, * kernel/exception-64s.h */ if (get_paca()->in_mce < MAX_MCE_DEPTH) - mce_find_instr_ea_and_pfn(regs, addr, phys_addr); + mce_find_instr_ea_and_phys(regs, addr, + phys_addr); } found = 1; } @@ -558,9 +575,18 @@ static int mce_handle_derror(struct pt_regs *regs, return 0; } -static long mce_handle_ue_error(struct pt_regs *regs) +static long mce_handle_ue_error(struct pt_regs *regs, + struct mce_error_info *mce_err) { long handled = 0; + const struct exception_table_entry *entry; + + entry = search_kernel_exception_table(regs->nip); + if (entry) { + mce_err->ignore_event = true; + regs->nip = extable_fixup(entry); + return 1; + } /* * On specific SCOM read via MMIO we may get a machine check @@ -593,7 +619,7 @@ static long mce_handle_error(struct pt_regs *regs, &phys_addr); if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE) - handled = mce_handle_ue_error(regs); + handled = mce_handle_ue_error(regs, &mce_err); save_mce_event(regs, handled, &mce_err, regs->nip, addr, phys_addr); diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index fe4bd321730e16..82df4b09e79f49 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -292,22 +292,20 @@ _GLOBAL(flush_instruction_cache) iccci 0,r3 #endif #elif defined(CONFIG_FSL_BOOKE) -BEGIN_FTR_SECTION +#ifdef CONFIG_E200 mfspr r3,SPRN_L1CSR0 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC /* msync; isync recommended here */ mtspr SPRN_L1CSR0,r3 isync blr -END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) +#endif mfspr r3,SPRN_L1CSR1 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR mtspr SPRN_L1CSR1,r3 +#elif defined(CONFIG_PPC_BOOK3S_601) + blr /* for 601, do nothing */ #else - mfspr r3,SPRN_PVR - rlwinm r3,r3,16,16,31 - cmpwi 0,r3,1 - beqlr /* for 601, do nothing */ /* 603/604 processor - use invalidate-all bit in HID0 */ mfspr r3,SPRN_HID0 ori r3,r3,HID0_ICFI @@ -326,10 +324,10 @@ EXPORT_SYMBOL(flush_instruction_cache) * flush_icache_range(unsigned long start, unsigned long stop) */ _GLOBAL(flush_icache_range) -BEGIN_FTR_SECTION +#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200) PURGE_PREFETCHED_INS - blr /* for 601, do nothing */ -END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) + blr /* for 601 and e200, do nothing */ +#else rlwinm r3,r3,0,0,31 - L1_CACHE_SHIFT subf r4,r3,r4 addi r4,r4,L1_CACHE_BYTES - 1 @@ -355,6 +353,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) sync /* additional sync needed on g4 */ isync blr +#endif _ASM_NOKPROBE_SYMBOL(flush_icache_range) EXPORT_SYMBOL(flush_icache_range) @@ -362,15 +361,15 @@ EXPORT_SYMBOL(flush_icache_range) * Flush a particular page from the data cache to RAM. * Note: this is necessary because the instruction cache does *not* * snoop from the data cache. - * This is a no-op on the 601 which has a unified cache. + * This is a no-op on the 601 and e200 which have a unified cache. * * void __flush_dcache_icache(void *page) */ _GLOBAL(__flush_dcache_icache) -BEGIN_FTR_SECTION +#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200) PURGE_PREFETCHED_INS blr -END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) +#else rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 @@ -398,6 +397,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x) sync isync blr +#endif #ifndef CONFIG_BOOKE /* @@ -409,10 +409,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x) * void __flush_dcache_icache_phys(unsigned long physaddr) */ _GLOBAL(__flush_dcache_icache_phys) -BEGIN_FTR_SECTION +#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200) PURGE_PREFETCHED_INS - blr /* for 601, do nothing */ -END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) + blr /* for 601 and e200, do nothing */ +#else mfmsr r10 rlwinm r0,r10,0,28,26 /* clear DR */ mtmsr r0 @@ -433,6 +433,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) mtmsr r10 /* restore DR */ isync blr +#endif #endif /* CONFIG_BOOKE */ /* @@ -452,7 +453,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) stwu r9,16(r3) _GLOBAL(copy_page) + rlwinm r5, r3, 0, L1_CACHE_BYTES - 1 addi r3,r3,-4 + +0: twnei r5, 0 /* WARN if r3 is not cache aligned */ + EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING + addi r4,r4,-4 li r5,4 diff --git a/arch/powerpc/kernel/note.S b/arch/powerpc/kernel/note.S new file mode 100644 index 00000000000000..bcdad15395ddd7 --- /dev/null +++ b/arch/powerpc/kernel/note.S @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PowerPC ELF notes. + * + * Copyright 2019, IBM Corporation + */ + +#include +#include + +/* + * Ultravisor-capable bit (PowerNV only). + * + * Bit 0 indicates that the powerpc kernel binary knows how to run in an + * ultravisor-enabled system. + * + * In an ultravisor-enabled system, some machine resources are now controlled + * by the ultravisor. If the kernel is not ultravisor-capable, but it ends up + * being run on a machine with ultravisor, the kernel will probably crash + * trying to access ultravisor resources. For instance, it may crash in early + * boot trying to set the partition table entry 0. + * + * In an ultravisor-enabled system, a bootloader could warn the user or prevent + * the kernel from being run if the PowerPC ultravisor capability doesn't exist + * or the Ultravisor-capable bit is not set. + */ +#ifdef CONFIG_PPC_POWERNV +#define PPCCAP_ULTRAVISOR_BIT (1 << 0) +#else +#define PPCCAP_ULTRAVISOR_BIT 0 +#endif + +/* + * Add the PowerPC Capabilities in the binary ELF note. It is a bitmap that + * can be used to advertise kernel capabilities to userland. + */ +#define PPC_CAPABILITIES_BITMAP (PPCCAP_ULTRAVISOR_BIT) + +ELFNOTE(PowerPC, PPC_ELFNOTE_CAPABILITIES, + .long PPC_CAPABILITIES_BITMAP) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index e3ad8aa4730d17..949eceb254d859 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include "setup.h" @@ -52,6 +54,43 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align, #ifdef CONFIG_PPC_PSERIES +#define LPPACA_SIZE 0x400 + +static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, + unsigned long limit, int cpu) +{ + size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); + static unsigned long shared_lppaca_size; + static void *shared_lppaca; + void *ptr; + + if (!shared_lppaca) { + memblock_set_bottom_up(true); + + shared_lppaca = + memblock_alloc_try_nid(shared_lppaca_total_size, + PAGE_SIZE, MEMBLOCK_LOW_LIMIT, + limit, NUMA_NO_NODE); + if (!shared_lppaca) + panic("cannot allocate shared data"); + + memblock_set_bottom_up(false); + uv_share_page(PHYS_PFN(__pa(shared_lppaca)), + shared_lppaca_total_size >> PAGE_SHIFT); + } + + ptr = shared_lppaca + shared_lppaca_size; + shared_lppaca_size += size; + + /* + * This is very early in boot, so no harm done if the kernel crashes at + * this point. + */ + BUG_ON(shared_lppaca_size >= shared_lppaca_total_size); + + return ptr; +} + /* * See asm/lppaca.h for more detail. * @@ -65,7 +104,7 @@ static inline void init_lppaca(struct lppaca *lppaca) *lppaca = (struct lppaca) { .desc = cpu_to_be32(0xd397d781), /* "LpPa" */ - .size = cpu_to_be16(0x400), + .size = cpu_to_be16(LPPACA_SIZE), .fpregs_in_use = 1, .slb_count = cpu_to_be16(64), .vmxregs_in_use = 0, @@ -75,19 +114,22 @@ static inline void init_lppaca(struct lppaca *lppaca) static struct lppaca * __init new_lppaca(int cpu, unsigned long limit) { struct lppaca *lp; - size_t size = 0x400; - BUILD_BUG_ON(size < sizeof(struct lppaca)); + BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE); if (early_cpu_has_feature(CPU_FTR_HVMODE)) return NULL; - lp = alloc_paca_data(size, 0x400, limit, cpu); + if (is_secure_guest()) + lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu); + else + lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu); + init_lppaca(lp); return lp; } -#endif /* CONFIG_PPC_BOOK3S */ +#endif /* CONFIG_PPC_PSERIES */ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index f627e15bb43ce4..1c448cf255061c 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1379,10 +1379,6 @@ void __init pcibios_resource_survey(void) pr_debug("PCI: Assigning unassigned resources...\n"); pci_assign_unassigned_resources(); } - - /* Call machine dependent fixup */ - if (ppc_md.pcibios_fixup) - ppc_md.pcibios_fixup(); } /* This is used by the PCI hotplug driver to allocate resource diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index 0b0cf8168b4753..fc62c4bc47b14e 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -55,11 +55,18 @@ EXPORT_SYMBOL_GPL(pci_find_bus_by_node); void pcibios_release_device(struct pci_dev *dev) { struct pci_controller *phb = pci_bus_to_host(dev->bus); + struct pci_dn *pdn = pci_get_pdn(dev); eeh_remove_device(dev); if (phb->controller_ops.release_device) phb->controller_ops.release_device(dev); + + /* free()ing the pci_dn has been deferred to us, do it now */ + if (pdn && (pdn->flags & PCI_DN_FLAG_DEAD)) { + pci_dbg(dev, "freeing dead pdn\n"); + kfree(pdn); + } } /** diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 50942a1d1a5fba..b49e1060a3bff7 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -263,6 +263,10 @@ static int __init pcibios_init(void) /* Call common code to handle resource allocation */ pcibios_resource_survey(); + /* Call machine dependent fixup */ + if (ppc_md.pcibios_fixup) + ppc_md.pcibios_fixup(); + /* Call machine dependent post-init code */ if (ppc_md.pcibios_after_init) ppc_md.pcibios_after_init(); diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index b7030b1189d017..f83d1f69b1dd83 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -54,14 +54,20 @@ static int __init pcibios_init(void) pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0); /* Scan all of the recorded PCI controllers. */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) pcibios_scan_phb(hose); - pci_bus_add_devices(hose->bus); - } /* Call common code to handle resource allocation */ pcibios_resource_survey(); + /* Add devices. */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + pci_bus_add_devices(hose->bus); + + /* Call machine dependent fixup */ + if (ppc_md.pcibios_fixup) + ppc_md.pcibios_fixup(); + printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); return 0; diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index c4c8c237a10681..9524009ca1ae43 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -323,6 +323,7 @@ void pci_remove_device_node_info(struct device_node *dn) { struct pci_dn *pdn = dn ? PCI_DN(dn) : NULL; struct device_node *parent; + struct pci_dev *pdev; #ifdef CONFIG_EEH struct eeh_dev *edev = pdn_to_eeh_dev(pdn); @@ -336,12 +337,28 @@ void pci_remove_device_node_info(struct device_node *dn) WARN_ON(!list_empty(&pdn->child_list)); list_del(&pdn->list); + /* Drop the parent pci_dn's ref to our backing dt node */ parent = of_get_parent(dn); if (parent) of_node_put(parent); - dn->data = NULL; - kfree(pdn); + /* + * At this point we *might* still have a pci_dev that was + * instantiated from this pci_dn. So defer free()ing it until + * the pci_dev's release function is called. + */ + pdev = pci_get_domain_bus_and_slot(pdn->phb->global_number, + pdn->busno, pdn->devfn); + if (pdev) { + /* NB: pdev has a ref to dn */ + pci_dbg(pdev, "marked pdn (from %pOF) as dead\n", dn); + pdn->flags |= PCI_DN_FLAG_DEAD; + } else { + dn->data = NULL; + kfree(pdn); + } + + pci_dev_put(pdev); } EXPORT_SYMBOL_GPL(pci_remove_device_node_info); diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 409c6c1beabf60..f91d7e94872ebd 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -34,31 +34,75 @@ static u32 get_int_prop(struct device_node *np, const char *name, u32 def) * pci_parse_of_flags - Parse the flags cell of a device tree PCI address * @addr0: value of 1st cell of a device tree PCI address. * @bridge: Set this flag if the address is from a bridge 'ranges' property + * + * PCI Bus Binding to IEEE Std 1275-1994 + * + * Bit# 33222222 22221111 11111100 00000000 + * 10987654 32109876 54321098 76543210 + * phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr + * phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh + * phys.lo cell: llllllll llllllll llllllll llllllll + * + * where: + * n is 0 if the address is relocatable, 1 otherwise + * p is 1 if the addressable region is "prefetchable", 0 otherwise + * t is 1 if the address is aliased (for non-relocatable I/O), + * below 1 MB (for Memory),or below 64 KB (for relocatable I/O). + * ss is the space code, denoting the address space: + * 00 denotes Configuration Space + * 01 denotes I/O Space + * 10 denotes 32-bit-address Memory Space + * 11 denotes 64-bit-address Memory Space + * bbbbbbbb is the 8-bit Bus Number + * ddddd is the 5-bit Device Number + * fff is the 3-bit Function Number + * rrrrrrrr is the 8-bit Register Number */ +#define OF_PCI_ADDR0_SPACE(ss) (((ss)&3)<<24) +#define OF_PCI_ADDR0_SPACE_CFG OF_PCI_ADDR0_SPACE(0) +#define OF_PCI_ADDR0_SPACE_IO OF_PCI_ADDR0_SPACE(1) +#define OF_PCI_ADDR0_SPACE_MMIO32 OF_PCI_ADDR0_SPACE(2) +#define OF_PCI_ADDR0_SPACE_MMIO64 OF_PCI_ADDR0_SPACE(3) +#define OF_PCI_ADDR0_SPACE_MASK OF_PCI_ADDR0_SPACE(3) +#define OF_PCI_ADDR0_RELOC (1UL<<31) +#define OF_PCI_ADDR0_PREFETCH (1UL<<30) +#define OF_PCI_ADDR0_ALIAS (1UL<<29) +#define OF_PCI_ADDR0_BUS 0x00FF0000UL +#define OF_PCI_ADDR0_DEV 0x0000F800UL +#define OF_PCI_ADDR0_FN 0x00000700UL +#define OF_PCI_ADDR0_BARREG 0x000000FFUL + unsigned int pci_parse_of_flags(u32 addr0, int bridge) { - unsigned int flags = 0; + unsigned int flags = 0, as = addr0 & OF_PCI_ADDR0_SPACE_MASK; - if (addr0 & 0x02000000) { + if (as == OF_PCI_ADDR0_SPACE_MMIO32 || as == OF_PCI_ADDR0_SPACE_MMIO64) { flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; - flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; - if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) - flags |= IORESOURCE_MEM_64; - flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; - if (addr0 & 0x40000000) - flags |= IORESOURCE_PREFETCH - | PCI_BASE_ADDRESS_MEM_PREFETCH; + + if (as == OF_PCI_ADDR0_SPACE_MMIO64) + flags |= PCI_BASE_ADDRESS_MEM_TYPE_64 | IORESOURCE_MEM_64; + + if (addr0 & OF_PCI_ADDR0_ALIAS) + flags |= PCI_BASE_ADDRESS_MEM_TYPE_1M; + + if (addr0 & OF_PCI_ADDR0_PREFETCH) + flags |= IORESOURCE_PREFETCH | + PCI_BASE_ADDRESS_MEM_PREFETCH; + /* Note: We don't know whether the ROM has been left enabled * by the firmware or not. We mark it as disabled (ie, we do * not set the IORESOURCE_ROM_ENABLE flag) for now rather than * do a config space read, it will be force-enabled if needed */ - if (!bridge && (addr0 & 0xff) == 0x30) + if (!bridge && (addr0 & OF_PCI_ADDR0_BARREG) == PCI_ROM_ADDRESS) flags |= IORESOURCE_READONLY; - } else if (addr0 & 0x01000000) + + } else if (as == OF_PCI_ADDR0_SPACE_IO) flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; + if (flags) flags |= IORESOURCE_SIZEALIGN; + return flags; } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7a84c9f1778e6a..639ceae7da9d81 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1587,8 +1587,9 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) /* * Copy architecture-specific thread state */ -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, + unsigned long tls) { struct pt_regs *childregs, *kregs; extern void ret_from_fork(void); @@ -1629,10 +1630,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (clone_flags & CLONE_SETTLS) { #ifdef CONFIG_PPC64 if (!is_32bit_task()) - childregs->gpr[13] = childregs->gpr[6]; + childregs->gpr[13] = tls; else #endif - childregs->gpr[2] = childregs->gpr[6]; + childregs->gpr[2] = tls; } f = ret_from_fork; @@ -2033,10 +2034,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) int count = 0; int firstframe = 1; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - struct ftrace_ret_stack *ret_stack; - extern void return_to_handler(void); - unsigned long rth = (unsigned long)return_to_handler; - int curr_frame = 0; + unsigned long ret_addr; + int ftrace_idx = 0; #endif if (tsk == NULL) @@ -2065,15 +2064,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) if (!firstframe || ip != lr) { printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if ((ip == rth) && curr_frame >= 0) { - ret_stack = ftrace_graph_get_ret_stack(current, - curr_frame++); - if (ret_stack) - pr_cont(" (%pS)", - (void *)ret_stack->ret); - else - curr_frame = -1; - } + ret_addr = ftrace_graph_ret_addr(current, + &ftrace_idx, ip, stack); + if (ret_addr != ip) + pr_cont(" (%pS)", (void *)ret_addr); #endif if (firstframe) pr_cont(" (unreliable)"); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 7159e791a70d17..6620f37abe7323 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -55,6 +55,7 @@ #include #include #include +#include #include @@ -702,9 +703,12 @@ void __init early_init_devtree(void *params) #ifdef CONFIG_PPC_POWERNV /* Some machines might need OPAL info for debugging, grab it now. */ of_scan_flat_dt(early_init_dt_scan_opal, NULL); + + /* Scan tree for ultravisor feature */ + of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL); #endif -#ifdef CONFIG_FA_DUMP +#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) /* scan tree to see if dump is active during last boot */ of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL); #endif @@ -731,7 +735,7 @@ void __init early_init_devtree(void *params) if (PHYSICAL_START > MEMORY_START) memblock_reserve(MEMORY_START, 0x8000); reserve_kdump_trampoline(); -#ifdef CONFIG_FA_DUMP +#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) /* * If we fail to reserve memory for firmware-assisted dump then * fallback to kexec based kdump. diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 514707ef677921..a4e7762dd28648 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -94,7 +95,7 @@ static int of_workarounds __prombss; #define PROM_BUG() do { \ prom_printf("kernel BUG at %s line 0x%x!\n", \ __FILE__, __LINE__); \ - __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ + __builtin_trap(); \ } while (0) #ifdef DEBUG_PROM @@ -171,6 +172,10 @@ static bool __prombss prom_radix_disable; static bool __prombss prom_xive_disable; #endif +#ifdef CONFIG_PPC_SVM +static bool __prombss prom_svm_enable; +#endif + struct platform_support { bool hash_mmu; bool radix_mmu; @@ -812,6 +817,17 @@ static void __init early_cmdline_parse(void) prom_debug("XIVE disabled from cmdline\n"); } #endif /* CONFIG_PPC_PSERIES */ + +#ifdef CONFIG_PPC_SVM + opt = prom_strstr(prom_cmd_line, "svm="); + if (opt) { + bool val; + + opt += sizeof("svm=") - 1; + if (!prom_strtobool(opt, &val)) + prom_svm_enable = val; + } +#endif /* CONFIG_PPC_SVM */ } #ifdef CONFIG_PPC_PSERIES @@ -1712,6 +1728,43 @@ static void __init prom_close_stdin(void) } } +#ifdef CONFIG_PPC_SVM +static int prom_rtas_hcall(uint64_t args) +{ + register uint64_t arg1 asm("r3") = H_RTAS; + register uint64_t arg2 asm("r4") = args; + + asm volatile("sc 1\n" : "=r" (arg1) : + "r" (arg1), + "r" (arg2) :); + return arg1; +} + +static struct rtas_args __prombss os_term_args; + +static void __init prom_rtas_os_term(char *str) +{ + phandle rtas_node; + __be32 val; + u32 token; + + prom_debug("%s: start...\n", __func__); + rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); + prom_debug("rtas_node: %x\n", rtas_node); + if (!PHANDLE_VALID(rtas_node)) + return; + + val = 0; + prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val)); + token = be32_to_cpu(val); + prom_debug("ibm,os-term: %x\n", token); + if (token == 0) + prom_panic("Could not get token for ibm,os-term\n"); + os_term_args.token = cpu_to_be32(token); + prom_rtas_hcall((uint64_t)&os_term_args); +} +#endif /* CONFIG_PPC_SVM */ + /* * Allocate room for and instantiate RTAS */ @@ -3168,6 +3221,46 @@ static void unreloc_toc(void) #endif #endif +#ifdef CONFIG_PPC_SVM +/* + * Perform the Enter Secure Mode ultracall. + */ +static int enter_secure_mode(unsigned long kbase, unsigned long fdt) +{ + register unsigned long r3 asm("r3") = UV_ESM; + register unsigned long r4 asm("r4") = kbase; + register unsigned long r5 asm("r5") = fdt; + + asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5)); + + return r3; +} + +/* + * Call the Ultravisor to transfer us to secure memory if we have an ESM blob. + */ +static void setup_secure_guest(unsigned long kbase, unsigned long fdt) +{ + int ret; + + if (!prom_svm_enable) + return; + + /* Switch to secure mode. */ + prom_printf("Switching to secure mode.\n"); + + ret = enter_secure_mode(kbase, fdt); + if (ret != U_SUCCESS) { + prom_printf("Returned %d from switching to secure mode.\n", ret); + prom_rtas_os_term("Switch to secure mode failed.\n"); + } +} +#else +static void setup_secure_guest(unsigned long kbase, unsigned long fdt) +{ +} +#endif /* CONFIG_PPC_SVM */ + /* * We enter here early on, when the Open Firmware prom is still * handling exceptions and the MMU hash table for us. @@ -3366,6 +3459,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unreloc_toc(); #endif + /* Move to secure memory if we're supposed to be secure guests. */ + setup_secure_guest(kbase, hdr); + __start(hdr, kbase, 0, 0, 0, 0, 0); return 0; diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 160bef0d553d97..78bab17b1396ae 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -33,7 +33,7 @@ OBJ="$2" ERROR=0 -function check_section() +check_section() { file=$1 section=$2 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 5faf0a64c92b85..c5fa251b8950c7 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -871,15 +872,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, return 0; for_each_cpu(cpu, cpus) { + struct device *dev = get_cpu_device(cpu); + switch (state) { case DOWN: - cpuret = cpu_down(cpu); + cpuret = device_offline(dev); break; case UP: - cpuret = cpu_up(cpu); + cpuret = device_online(dev); break; } - if (cpuret) { + if (cpuret < 0) { pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", __func__, ((state == UP) ? "up" : "down"), @@ -896,6 +899,7 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, cpumask_clear_cpu(cpu, cpus); } } + cond_resched(); } return ret; @@ -922,13 +926,11 @@ int rtas_online_cpus_mask(cpumask_var_t cpus) return ret; } -EXPORT_SYMBOL(rtas_online_cpus_mask); int rtas_offline_cpus_mask(cpumask_var_t cpus) { return rtas_cpu_state_change_mask(DOWN, cpus); } -EXPORT_SYMBOL(rtas_offline_cpus_mask); int rtas_ibm_suspend_me(u64 handle) { @@ -968,6 +970,8 @@ int rtas_ibm_suspend_me(u64 handle) data.token = rtas_token("ibm,suspend-me"); data.complete = &done; + lock_device_hotplug(); + /* All present CPUs must be online */ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); cpuret = rtas_online_cpus_mask(offline_mask); @@ -1006,6 +1010,7 @@ int rtas_ibm_suspend_me(u64 handle) __func__); out: + unlock_device_hotplug(); free_cpumask_var(offline_mask); return atomic_read(&data.error); } diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index e1c9cf079503e3..7cfcb294b11ca7 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -28,7 +28,7 @@ static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NO bool barrier_nospec_enabled; static bool no_nospec; static bool btb_flush_enabled; -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) static bool no_spectrev2; #endif @@ -114,7 +114,7 @@ static __init int security_feature_debugfs_init(void) device_initcall(security_feature_debugfs_init); #endif /* CONFIG_DEBUG_FS */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) static int __init handle_nospectre_v2(char *p) { no_spectrev2 = true; @@ -122,6 +122,9 @@ static int __init handle_nospectre_v2(char *p) return 0; } early_param("nospectre_v2", handle_nospectre_v2); +#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ + +#ifdef CONFIG_PPC_FSL_BOOK3E void setup_spectre_v2(void) { if (no_spectrev2 || cpu_mitigations_off()) @@ -399,7 +402,17 @@ static void toggle_count_cache_flush(bool enable) void setup_count_cache_flush(void) { - toggle_count_cache_flush(true); + bool enable = true; + + if (no_spectrev2 || cpu_mitigations_off()) { + if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || + security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) + pr_warn("Spectre v2 mitigations not under software control, can't disable\n"); + + enable = false; + } + + toggle_count_cache_flush(enable); } #ifdef CONFIG_DEBUG_FS diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 5e6543aba1b328..25aaa390300091 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -800,9 +800,15 @@ static __init void print_system_info(void) pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); #ifdef CONFIG_PPC64 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features); +#ifdef CONFIG_PPC_BOOK3S + pr_info("vmalloc start = 0x%lx\n", KERN_VIRT_START); + pr_info("IO start = 0x%lx\n", KERN_IO_START); + pr_info("vmemmap start = 0x%lx\n", (unsigned long)vmemmap); +#endif #endif - print_system_hash_info(); + if (!early_radix_enabled()) + print_system_hash_info(); if (PHYSICAL_START > 0) pr_info("physical_start = 0x%llx\n", diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 94517e4a272359..a7541edf0cdb6f 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -206,6 +206,6 @@ __init void initialize_cache_info(void) dcache_bsize = cur_cpu_spec->dcache_bsize; icache_bsize = cur_cpu_spec->icache_bsize; ucache_bsize = 0; - if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601) || IS_ENABLED(CONFIG_E200)) ucache_bsize = icache_bsize = dcache_bsize; } diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 1e2276963f6d32..e2a46cfed5fd1b 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -182,7 +182,7 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, * FIXME: IMHO these tests do not belong in * arch-dependent code, they are generic. */ - ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL); + ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack); #ifdef CONFIG_KPROBES /* * Mark stacktraces with kretprobed functions on them diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index e2147d7c9e72fb..80a676da11cbcb 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "cacheinfo.h" #include "setup.h" @@ -715,6 +716,23 @@ static struct device_attribute pa6t_attrs[] = { #endif /* HAS_PPC_PMC_PA6T */ #endif /* HAS_PPC_PMC_CLASSIC */ +#ifdef CONFIG_PPC_SVM +static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", is_secure_guest()); +} +static DEVICE_ATTR(svm, 0444, show_svm, NULL); + +static void create_svm_file(void) +{ + device_create_file(cpu_subsys.dev_root, &dev_attr_svm); +} +#else +static void create_svm_file(void) +{ +} +#endif /* CONFIG_PPC_SVM */ + static int register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); @@ -1058,6 +1076,8 @@ static int __init topology_init(void) sysfs_create_dscr_default(); #endif /* CONFIG_PPC64 */ + create_svm_file(); + return 0; } subsys_initcall(topology_init); diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index be1ca98fce5c02..7ea0ca044b6500 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -944,7 +944,8 @@ int ftrace_disable_ftrace_graph_caller(void) * Hook the return address and push it in the stack of return addrs * in current thread info. Return the address we want to divert to. */ -unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, + unsigned long sp) { unsigned long return_hooker; @@ -956,7 +957,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) return_hooker = ppc_function_entry(return_to_handler); - if (!function_graph_enter(parent, ip, 0, NULL)) + if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) parent = return_hooker; out: return parent; diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S index 183f608efb81a4..e023ae59c4294c 100644 --- a/arch/powerpc/kernel/trace/ftrace_32.S +++ b/arch/powerpc/kernel/trace/ftrace_32.S @@ -50,6 +50,7 @@ _GLOBAL(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(ftrace_graph_caller) + addi r5, r1, 48 /* load r4 with local address */ lwz r4, 44(r1) subi r4, r4, MCOUNT_INSN_SIZE diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 74acbf16a66624..f9fd5f743eba34 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -294,6 +294,7 @@ _GLOBAL(ftrace_graph_caller) std r2, 24(r1) ld r2, PACATOC(r13) /* get kernel TOC in r2 */ + addi r5, r1, 112 mfctr r4 /* ftrace_caller has moved local addr here */ std r4, 40(r1) mflr r3 /* ftrace_caller has restored LR from stack */ diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.S b/arch/powerpc/kernel/trace/ftrace_64_pg.S index e41a7d13c99c0d..6708e24db0aba8 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_pg.S +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.S @@ -41,6 +41,7 @@ _GLOBAL(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(ftrace_graph_caller) + addi r5, r1, 112 /* load r4 with local address */ ld r4, 128(r1) subi r4, r4, MCOUNT_INSN_SIZE diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 11caa0291254e9..82f43535e68674 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -472,6 +472,7 @@ void system_reset_exception(struct pt_regs *regs) if (debugger(regs)) goto out; + kmsg_dump(KMSG_DUMP_OOPS); /* * A system reset is a request to dump, so we always send * it through the crashdump code (if fadump or kdump are diff --git a/arch/powerpc/kernel/ucall.S b/arch/powerpc/kernel/ucall.S new file mode 100644 index 00000000000000..07296bc3916643 --- /dev/null +++ b/arch/powerpc/kernel/ucall.S @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic code to perform an ultravisor call. + * + * Copyright 2019, IBM Corporation. + * + */ +#include +#include + +_GLOBAL(ucall_norets) +EXPORT_SYMBOL_GPL(ucall_norets) + sc 2 /* Invoke the ultravisor */ + blr /* Return r3 = status */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index d60598113a9f3d..eae9ddaecbcf46 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -94,28 +94,6 @@ static struct vdso_patch_def vdso_patches[] = { CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE, "__kernel_sync_dicache", "__kernel_sync_dicache_p5" }, -#ifdef CONFIG_PPC32 - { - CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, - "__kernel_gettimeofday", NULL - }, - { - CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, - "__kernel_clock_gettime", NULL - }, - { - CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, - "__kernel_clock_getres", NULL - }, - { - CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, - "__kernel_get_tbfreq", NULL - }, - { - CPU_FTR_USE_RTC, CPU_FTR_USE_RTC, - "__kernel_time", NULL - }, -#endif }; /* diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 6984125b9fc092..6c7401bd284e7d 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -70,6 +70,7 @@ V_FUNCTION_END(__kernel_get_syscall_map) * * returns the timebase frequency in HZ */ +#ifndef CONFIG_PPC_BOOK3S_601 V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 @@ -82,3 +83,4 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) blr .cfi_endproc V_FUNCTION_END(__kernel_get_tbfreq) +#endif diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 099a6db14e671f..00c025ba4a92b4 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -144,10 +144,13 @@ VERSION __kernel_datapage_offset; __kernel_get_syscall_map; +#ifndef CONFIG_PPC_BOOK3S_601 __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; + __kernel_time; __kernel_get_tbfreq; +#endif __kernel_sync_dicache; __kernel_sync_dicache_p5; __kernel_sigtramp32; @@ -155,7 +158,6 @@ VERSION #ifdef CONFIG_PPC64 __kernel_getcpu; #endif - __kernel_time; local: *; }; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index c4b606fe73ebce..5834db0a54c66a 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -416,7 +416,7 @@ static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, unsigned long hpa = 0; enum dma_data_direction dir = DMA_NONE; - iommu_tce_xchg(mm, tbl, entry, &hpa, &dir); + iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir); } static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, @@ -447,7 +447,8 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, unsigned long hpa = 0; long ret; - if (WARN_ON_ONCE(iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir))) + if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, + &dir))) return H_TOO_HARD; if (dir == DMA_NONE) @@ -455,7 +456,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); if (ret != H_SUCCESS) - iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir); + iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); return ret; } @@ -501,7 +502,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (mm_iommu_mapped_inc(mem)) return H_TOO_HARD; - ret = iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir); + ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); if (WARN_ON_ONCE(ret)) { mm_iommu_mapped_dec(mem); return H_TOO_HARD; @@ -579,6 +580,8 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); + iommu_tce_kill(stit->tbl, entry, 1); + if (ret != H_SUCCESS) { kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); goto unlock_exit; @@ -656,13 +659,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, */ if (get_user(tce, tces + i)) { ret = H_TOO_HARD; - goto unlock_exit; + goto invalidate_exit; } tce = be64_to_cpu(tce); if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; - goto unlock_exit; + goto invalidate_exit; } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -673,13 +676,17 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (ret != H_SUCCESS) { kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); - goto unlock_exit; + goto invalidate_exit; } } kvmppc_tce_put(stt, entry + i, tce); } +invalidate_exit: + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) + iommu_tce_kill(stit->tbl, entry, npages); + unlock_exit: srcu_read_unlock(&vcpu->kvm->srcu, idx); @@ -718,7 +725,7 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, continue; if (ret == H_TOO_HARD) - return ret; + goto invalidate_exit; WARN_ON_ONCE(1); kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); @@ -728,6 +735,10 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); - return H_SUCCESS; +invalidate_exit: + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) + iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages); + + return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index b4f20f13b8604a..ab6eeb8e753e55 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -218,13 +218,14 @@ static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt, return H_SUCCESS; } -static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, +static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm, + struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) { long ret; - ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); + ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true); if (!ret && ((*direction == DMA_FROM_DEVICE) || (*direction == DMA_BIDIRECTIONAL))) { @@ -240,13 +241,20 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, return ret; } +extern void iommu_tce_kill_rm(struct iommu_table *tbl, + unsigned long entry, unsigned long pages) +{ + if (tbl->it_ops->tce_kill) + tbl->it_ops->tce_kill(tbl, entry, pages, true); +} + static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry) { unsigned long hpa = 0; enum dma_data_direction dir = DMA_NONE; - iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); + iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); } static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, @@ -278,7 +286,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, unsigned long hpa = 0; long ret; - if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir)) + if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir)) /* * real mode xchg can fail if struct page crosses * a page boundary @@ -290,7 +298,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); if (ret) - iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); + iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); return ret; } @@ -336,7 +344,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) return H_TOO_HARD; - ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); + ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); if (ret) { mm_iommu_mapped_dec(mem); /* @@ -417,6 +425,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); + iommu_tce_kill_rm(stit->tbl, entry, 1); + if (ret != H_SUCCESS) { kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); return ret; @@ -558,7 +568,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ua = 0; if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { ret = H_PARAMETER; - goto unlock_exit; + goto invalidate_exit; } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -569,13 +579,17 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (ret != H_SUCCESS) { kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); - goto unlock_exit; + goto invalidate_exit; } } kvmppc_rm_tce_put(stt, entry + i, tce); } +invalidate_exit: + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) + iommu_tce_kill_rm(stit->tbl, entry, npages); + unlock_exit: if (rmap) unlock_rmap(rmap); @@ -618,7 +632,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, continue; if (ret == H_TOO_HARD) - return ret; + goto invalidate_exit; WARN_ON_ONCE_RM(1); kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); @@ -628,7 +642,11 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); - return H_SUCCESS; +invalidate_exit: + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) + iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages); + + return ret; } /* This can be called in either virtual mode or real mode */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index f8975c620f41c4..efd8f93bc9dc12 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5474,6 +5474,12 @@ static int kvmppc_radix_possible(void) static int kvmppc_book3s_init_hv(void) { int r; + + if (!tlbie_capable) { + pr_err("KVM-HV: Host does not support TLBIE\n"); + return -ENODEV; + } + /* * FIXME!! Do we need to check on all cpus ? */ diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 735e0ac6f5b27a..fff90f2c3de210 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -398,7 +398,7 @@ static void kvmhv_flush_lpid(unsigned int lpid) long rc; if (!kvmhv_on_pseries()) { - radix__flush_tlb_lpid(lpid); + radix__flush_all_lpid(lpid); return; } @@ -411,7 +411,7 @@ static void kvmhv_flush_lpid(unsigned int lpid) void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) { if (!kvmhv_on_pseries()) { - mmu_partition_table_set_entry(lpid, dw0, dw1); + mmu_partition_table_set_entry(lpid, dw0, dw1, true); return; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 07181d0dfcb79a..9a05b0d932efa5 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -29,6 +29,7 @@ #include #include #include +#include /* Sign-extend HDEC if not on POWER9 */ #define EXTEND_HDEC(reg) \ @@ -1085,16 +1086,10 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5, VCPU_LR(r4) - ld r6, VCPU_CR(r4) mtlr r5 - mtcr r6 ld r1, VCPU_GPR(R1)(r4) - ld r2, VCPU_GPR(R2)(r4) - ld r3, VCPU_GPR(R3)(r4) ld r5, VCPU_GPR(R5)(r4) - ld r6, VCPU_GPR(R6)(r4) - ld r7, VCPU_GPR(R7)(r4) ld r8, VCPU_GPR(R8)(r4) ld r9, VCPU_GPR(R9)(r4) ld r10, VCPU_GPR(R10)(r4) @@ -1112,10 +1107,42 @@ BEGIN_FTR_SECTION mtspr SPRN_HDSISR, r0 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + ld r6, VCPU_KVM(r4) + lbz r7, KVM_SECURE_GUEST(r6) + cmpdi r7, 0 + ld r6, VCPU_GPR(R6)(r4) + ld r7, VCPU_GPR(R7)(r4) + bne ret_to_ultra + + lwz r0, VCPU_CR(r4) + mtcr r0 + ld r0, VCPU_GPR(R0)(r4) + ld r2, VCPU_GPR(R2)(r4) + ld r3, VCPU_GPR(R3)(r4) ld r4, VCPU_GPR(R4)(r4) HRFI_TO_GUEST b . +/* + * Use UV_RETURN ultracall to return control back to the Ultravisor after + * processing an hypercall or interrupt that was forwarded (a.k.a. reflected) + * to the Hypervisor. + * + * All registers have already been loaded, except: + * R0 = hcall result + * R2 = SRR1, so UV can detect a synthesized interrupt (if any) + * R3 = UV_RETURN + */ +ret_to_ultra: + lwz r0, VCPU_CR(r4) + mtcr r0 + + ld r0, VCPU_GPR(R3)(r4) + mfspr r2, SPRN_SRR1 + li r3, 0 + ori r3, r3, UV_RETURN + ld r4, VCPU_GPR(R4)(r4) + sc 2 /* * Enter the guest on a P9 or later system where we have exactly diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index eebc782d89a53e..b8de3be10eb473 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -16,7 +16,7 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING endif -obj-y += alloc.o code-patching.o feature-fixups.o +obj-y += alloc.o code-patching.o feature-fixups.o pmem.o ifndef CONFIG_KASAN obj-y += string.o memcmp_$(BITS).o @@ -39,7 +39,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ memcpy_power7.o obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ - memcpy_64.o pmem.o + memcpy_64.o memcpy_mcsafe_64.o obj64-$(CONFIG_SMP) += locks.o obj64-$(CONFIG_ALTIVEC) += vmx-helper.o diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 6550b9e5ce5f84..6440d5943c0034 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -18,7 +18,7 @@ #include #include -void __spin_yield(arch_spinlock_t *lock) +void splpar_spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -36,14 +36,14 @@ void __spin_yield(arch_spinlock_t *lock) plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), yield_count); } -EXPORT_SYMBOL_GPL(__spin_yield); +EXPORT_SYMBOL_GPL(splpar_spin_yield); /* * Waiting for a read lock or a write lock on a rwlock... * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ -void __rw_yield(arch_rwlock_t *rw) +void splpar_rw_yield(arch_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; diff --git a/arch/powerpc/lib/memcpy_mcsafe_64.S b/arch/powerpc/lib/memcpy_mcsafe_64.S new file mode 100644 index 00000000000000..cb882d9a6d8a3d --- /dev/null +++ b/arch/powerpc/lib/memcpy_mcsafe_64.S @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) IBM Corporation, 2011 + * Derived from copyuser_power7.s by Anton Blanchard + * Author - Balbir Singh + */ +#include +#include +#include + + .macro err1 +100: + EX_TABLE(100b,.Ldo_err1) + .endm + + .macro err2 +200: + EX_TABLE(200b,.Ldo_err2) + .endm + + .macro err3 +300: EX_TABLE(300b,.Ldone) + .endm + +.Ldo_err2: + ld r22,STK_REG(R22)(r1) + ld r21,STK_REG(R21)(r1) + ld r20,STK_REG(R20)(r1) + ld r19,STK_REG(R19)(r1) + ld r18,STK_REG(R18)(r1) + ld r17,STK_REG(R17)(r1) + ld r16,STK_REG(R16)(r1) + ld r15,STK_REG(R15)(r1) + ld r14,STK_REG(R14)(r1) + addi r1,r1,STACKFRAMESIZE +.Ldo_err1: + /* Do a byte by byte copy to get the exact remaining size */ + mtctr r7 +46: +err3; lbz r0,0(r4) + addi r4,r4,1 +err3; stb r0,0(r3) + addi r3,r3,1 + bdnz 46b + li r3,0 + blr + +.Ldone: + mfctr r3 + blr + + +_GLOBAL(memcpy_mcsafe) + mr r7,r5 + cmpldi r5,16 + blt .Lshort_copy + +.Lcopy: + /* Get the source 8B aligned */ + neg r6,r4 + mtocrf 0x01,r6 + clrldi r6,r6,(64-3) + + bf cr7*4+3,1f +err1; lbz r0,0(r4) + addi r4,r4,1 +err1; stb r0,0(r3) + addi r3,r3,1 + subi r7,r7,1 + +1: bf cr7*4+2,2f +err1; lhz r0,0(r4) + addi r4,r4,2 +err1; sth r0,0(r3) + addi r3,r3,2 + subi r7,r7,2 + +2: bf cr7*4+1,3f +err1; lwz r0,0(r4) + addi r4,r4,4 +err1; stw r0,0(r3) + addi r3,r3,4 + subi r7,r7,4 + +3: sub r5,r5,r6 + cmpldi r5,128 + + mflr r0 + stdu r1,-STACKFRAMESIZE(r1) + std r14,STK_REG(R14)(r1) + std r15,STK_REG(R15)(r1) + std r16,STK_REG(R16)(r1) + std r17,STK_REG(R17)(r1) + std r18,STK_REG(R18)(r1) + std r19,STK_REG(R19)(r1) + std r20,STK_REG(R20)(r1) + std r21,STK_REG(R21)(r1) + std r22,STK_REG(R22)(r1) + std r0,STACKFRAMESIZE+16(r1) + + blt 5f + srdi r6,r5,7 + mtctr r6 + + /* Now do cacheline (128B) sized loads and stores. */ + .align 5 +4: +err2; ld r0,0(r4) +err2; ld r6,8(r4) +err2; ld r8,16(r4) +err2; ld r9,24(r4) +err2; ld r10,32(r4) +err2; ld r11,40(r4) +err2; ld r12,48(r4) +err2; ld r14,56(r4) +err2; ld r15,64(r4) +err2; ld r16,72(r4) +err2; ld r17,80(r4) +err2; ld r18,88(r4) +err2; ld r19,96(r4) +err2; ld r20,104(r4) +err2; ld r21,112(r4) +err2; ld r22,120(r4) + addi r4,r4,128 +err2; std r0,0(r3) +err2; std r6,8(r3) +err2; std r8,16(r3) +err2; std r9,24(r3) +err2; std r10,32(r3) +err2; std r11,40(r3) +err2; std r12,48(r3) +err2; std r14,56(r3) +err2; std r15,64(r3) +err2; std r16,72(r3) +err2; std r17,80(r3) +err2; std r18,88(r3) +err2; std r19,96(r3) +err2; std r20,104(r3) +err2; std r21,112(r3) +err2; std r22,120(r3) + addi r3,r3,128 + subi r7,r7,128 + bdnz 4b + + clrldi r5,r5,(64-7) + + /* Up to 127B to go */ +5: srdi r6,r5,4 + mtocrf 0x01,r6 + +6: bf cr7*4+1,7f +err2; ld r0,0(r4) +err2; ld r6,8(r4) +err2; ld r8,16(r4) +err2; ld r9,24(r4) +err2; ld r10,32(r4) +err2; ld r11,40(r4) +err2; ld r12,48(r4) +err2; ld r14,56(r4) + addi r4,r4,64 +err2; std r0,0(r3) +err2; std r6,8(r3) +err2; std r8,16(r3) +err2; std r9,24(r3) +err2; std r10,32(r3) +err2; std r11,40(r3) +err2; std r12,48(r3) +err2; std r14,56(r3) + addi r3,r3,64 + subi r7,r7,64 + +7: ld r14,STK_REG(R14)(r1) + ld r15,STK_REG(R15)(r1) + ld r16,STK_REG(R16)(r1) + ld r17,STK_REG(R17)(r1) + ld r18,STK_REG(R18)(r1) + ld r19,STK_REG(R19)(r1) + ld r20,STK_REG(R20)(r1) + ld r21,STK_REG(R21)(r1) + ld r22,STK_REG(R22)(r1) + addi r1,r1,STACKFRAMESIZE + + /* Up to 63B to go */ + bf cr7*4+2,8f +err1; ld r0,0(r4) +err1; ld r6,8(r4) +err1; ld r8,16(r4) +err1; ld r9,24(r4) + addi r4,r4,32 +err1; std r0,0(r3) +err1; std r6,8(r3) +err1; std r8,16(r3) +err1; std r9,24(r3) + addi r3,r3,32 + subi r7,r7,32 + + /* Up to 31B to go */ +8: bf cr7*4+3,9f +err1; ld r0,0(r4) +err1; ld r6,8(r4) + addi r4,r4,16 +err1; std r0,0(r3) +err1; std r6,8(r3) + addi r3,r3,16 + subi r7,r7,16 + +9: clrldi r5,r5,(64-4) + + /* Up to 15B to go */ +.Lshort_copy: + mtocrf 0x01,r5 + bf cr7*4+0,12f +err1; lwz r0,0(r4) /* Less chance of a reject with word ops */ +err1; lwz r6,4(r4) + addi r4,r4,8 +err1; stw r0,0(r3) +err1; stw r6,4(r3) + addi r3,r3,8 + subi r7,r7,8 + +12: bf cr7*4+1,13f +err1; lwz r0,0(r4) + addi r4,r4,4 +err1; stw r0,0(r3) + addi r3,r3,4 + subi r7,r7,4 + +13: bf cr7*4+2,14f +err1; lhz r0,0(r4) + addi r4,r4,2 +err1; sth r0,0(r3) + addi r3,r3,2 + subi r7,r7,2 + +14: bf cr7*4+3,15f +err1; lbz r0,0(r4) +err1; stb r0,0(r3) + +15: li r3,0 + blr + +EXPORT_SYMBOL_GPL(memcpy_mcsafe); diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 0f499db315d60b..5e147986400d5e 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -7,7 +7,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-y := fault.o mem.o pgtable.o mmap.o \ init_$(BITS).o pgtable_$(BITS).o \ - pgtable-frag.o \ + pgtable-frag.o ioremap.o ioremap_$(BITS).o \ init-common.o mmu_context.o drmem.o obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/ obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/ diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index e249fbf6b9c344..84d5fab94f8f82 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -74,7 +74,7 @@ static int find_free_bat(void) { int b; - if (cpu_has_feature(CPU_FTR_601)) { + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) { for (b = 0; b < 4; b++) { struct ppc_bat *bat = BATS[b]; @@ -106,7 +106,7 @@ static int find_free_bat(void) */ static unsigned int block_size(unsigned long base, unsigned long top) { - unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20; + unsigned int max_size = IS_ENABLED(CONFIG_PPC_BOOK3S_601) ? SZ_8M : SZ_256M; unsigned int base_shift = (ffs(base) - 1) & 31; unsigned int block_shift = (fls(top - base) - 1) & 31; @@ -189,7 +189,7 @@ void mmu_mark_initmem_nx(void) unsigned long top = (unsigned long)_etext - PAGE_OFFSET; unsigned long size; - if (cpu_has_feature(CPU_FTR_601)) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) return; for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { @@ -227,7 +227,7 @@ void mmu_mark_rodata_ro(void) int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; - if (cpu_has_feature(CPU_FTR_601)) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) return; for (i = 0; i < nb; i++) { @@ -259,7 +259,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, flags &= ~_PAGE_COHERENT; bl = (size >> 17) - 1; - if (PVR_VER(mfspr(SPRN_PVR)) != 1) { + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_601)) { /* 603, 604, etc. */ /* Do DBAT first */ wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE @@ -297,8 +297,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, /* * Preload a translation in the hash table */ -void hash_preload(struct mm_struct *mm, unsigned long ea, - bool is_exec, unsigned long trap) +void hash_preload(struct mm_struct *mm, unsigned long ea) { pmd_t *pmd; @@ -309,6 +308,39 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, add_hash_page(mm->context.id, ea, pmd_val(*pmd)); } +/* + * This is called at the end of handling a user page fault, when the + * fault has been handled by updating a PTE in the linux page tables. + * We use it to preload an HPTE into the hash table corresponding to + * the updated linux PTE. + * + * This must always be called with the pte lock held. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep) +{ + if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) + return; + /* + * We don't need to worry about _PAGE_PRESENT here because we are + * called with either mm->page_table_lock held or ptl lock held + */ + + /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ + if (!pte_young(*ptep) || address >= TASK_SIZE) + return; + + /* We have to test for regs NULL since init will get here first thing at boot */ + if (!current->thread.regs) + return; + + /* We also avoid filling the hash if not coming from a fault */ + if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400) + return; + + hash_preload(vma->vm_mm, address); +} + /* * Initialize the hash table and patch the instructions in hashtable.S. */ @@ -358,6 +390,15 @@ void __init MMU_init_hw(void) hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg; if (lg_n_hpteg > 16) hash_mb2 = 16 - LG_HPTEG_SIZE; + + /* + * When KASAN is selected, there is already an early temporary hash + * table and the switch to the final hash table is done later. + */ + if (IS_ENABLED(CONFIG_KASAN)) + return; + + MMU_init_hw_patch(); } void __init MMU_init_hw_patch(void) @@ -400,7 +441,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, BUG_ON(first_memblock_base != 0); /* 601 can only access 16MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 1) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); else /* Anything else has 256M mapped */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); @@ -418,9 +459,6 @@ void __init setup_kuep(bool disabled) { pr_info("Activating Kernel Userspace Execution Prevention\n"); - if (cpu_has_feature(CPU_FTR_601)) - pr_warn("KUEP is not working on powerpc 601 (No NX bit in Seg Regs)\n"); - if (disabled) pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n"); } diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index b8ad14bb11704e..3410ea9f4de1c1 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -61,6 +62,7 @@ #include #include #include +#include #include @@ -271,10 +273,6 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; - /* Make kvm guest trampolines executable */ - if (overlaps_kvm_tmp(vaddr, vaddr + step)) - tprot &= ~HPTE_R_N; - /* * If relocatable, check if it overlaps interrupt vectors that * are copied down to real 0. For relocatable kernel @@ -823,7 +821,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table, * For now, UPRT is 0 and we have no segment table. */ htab_size = __ilog2(htab_size) - 18; - mmu_partition_table_set_entry(0, hash_table | htab_size, 0); + mmu_partition_table_set_entry(0, hash_table | htab_size, 0, false); pr_info("Partition table %p\n", partition_tb); } @@ -857,12 +855,6 @@ static void __init htab_initialize(void) /* Using a hypervisor which owns the htab */ htab_address = NULL; _SDR1 = 0; - /* - * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall - * to inform the hypervisor that we wish to use the HPT. - */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - register_process_table(0, 0, 0); #ifdef CONFIG_FA_DUMP /* * If firmware assisted dump is active firmware preserves @@ -1075,8 +1067,8 @@ void hash__early_init_mmu_secondary(void) if (!cpu_has_feature(CPU_FTR_ARCH_300)) mtspr(SPRN_SDR1, _SDR1); else - mtspr(SPRN_PTCR, - __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); + set_ptcr_when_no_uv(__pa(partition_tb) | + (PATB_SIZE_SHIFT - 12)); } /* Initialize SLB */ slb_initialize(); @@ -1460,8 +1452,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap, } EXPORT_SYMBOL_GPL(hash_page); -int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap, - unsigned long dsisr) +int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, + unsigned long msr) { unsigned long access = _PAGE_PRESENT | _PAGE_READ; unsigned long flags = 0; @@ -1518,8 +1510,8 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) } #endif -void hash_preload(struct mm_struct *mm, unsigned long ea, - bool is_exec, unsigned long trap) +static void hash_preload(struct mm_struct *mm, unsigned long ea, + bool is_exec, unsigned long trap) { int hugepage_shift; unsigned long vsid; @@ -1599,6 +1591,57 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, local_irq_restore(flags); } +/* + * This is called at the end of handling a user page fault, when the + * fault has been handled by updating a PTE in the linux page tables. + * We use it to preload an HPTE into the hash table corresponding to + * the updated linux PTE. + * + * This must always be called with the pte lock held. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep) +{ + /* + * We don't need to worry about _PAGE_PRESENT here because we are + * called with either mm->page_table_lock held or ptl lock held + */ + unsigned long trap; + bool is_exec; + + if (radix_enabled()) { + prefetch((void *)address); + return; + } + + /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ + if (!pte_young(*ptep) || address >= TASK_SIZE) + return; + + /* + * We try to figure out if we are coming from an instruction + * access fault and pass that down to __hash_page so we avoid + * double-faulting on execution of fresh text. We have to test + * for regs NULL since init will get here first thing at boot. + * + * We also avoid filling the hash if not coming from a fault. + */ + + trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; + switch (trap) { + case 0x300: + is_exec = false; + break; + case 0x400: + is_exec = true; + break; + default: + return; + } + + hash_preload(vma->vm_mm, address, is_exec, trap); +} + #ifdef CONFIG_PPC_MEM_KEYS /* * Return the protection key associated with the given address and the @@ -1931,10 +1974,16 @@ static int hpt_order_get(void *data, u64 *val) static int hpt_order_set(void *data, u64 val) { + int ret; + if (!mmu_hash_ops.resize_hpt) return -ENODEV; - return mmu_hash_ops.resize_hpt(val); + cpus_read_lock(); + ret = mmu_hash_ops.resize_hpt(val); + cpus_read_unlock(); + + return ret; } DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n"); @@ -1957,7 +2006,4 @@ void __init print_system_hash_info(void) if (htab_hash_mask) pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); - pr_info("kernel vmalloc start = 0x%lx\n", KERN_VIRT_START); - pr_info("kernel IO start = 0x%lx\n", KERN_IO_START); - pr_info("kernel vmemmap start = 0x%lx\n", (unsigned long)vmemmap); } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 7d0e0d0d22c405..75483b40fcb13a 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -8,10 +8,13 @@ #include #include +#include #include #include #include #include +#include +#include #include #include @@ -21,9 +24,6 @@ EXPORT_SYMBOL(__pmd_frag_nr); unsigned long __pmd_frag_size_shift; EXPORT_SYMBOL(__pmd_frag_size_shift); -int (*register_process_table)(unsigned long base, unsigned long page_size, - unsigned long tbl_size); - #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is called when relaxing access to a hugepage. It's also called in the page @@ -205,37 +205,61 @@ void __init mmu_partition_table_init(void) * 64 K size. */ ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); - mtspr(SPRN_PTCR, ptcr); + set_ptcr_when_no_uv(ptcr); powernv_set_nmmu_ptcr(ptcr); } +static void flush_partition(unsigned int lpid, bool radix) +{ + if (radix) { + radix__flush_all_lpid(lpid); + radix__flush_all_lpid_guest(lpid); + } else { + asm volatile("ptesync" : : : "memory"); + asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + /* do we need fixup here ?*/ + asm volatile("eieio; tlbsync; ptesync" : : : "memory"); + trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); + } +} + void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, - unsigned long dw1) + unsigned long dw1, bool flush) { unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); + /* + * When ultravisor is enabled, the partition table is stored in secure + * memory and can only be accessed doing an ultravisor call. However, we + * maintain a copy of the partition table in normal memory to allow Nest + * MMU translations to occur (for normal VMs). + * + * Therefore, here we always update partition_tb, regardless of whether + * we are running under an ultravisor or not. + */ partition_tb[lpid].patb0 = cpu_to_be64(dw0); partition_tb[lpid].patb1 = cpu_to_be64(dw1); /* - * Global flush of TLBs and partition table caches for this lpid. - * The type of flush (hash or radix) depends on what the previous - * use of this partition ID was, not the new use. + * If ultravisor is enabled, we do an ultravisor call to register the + * partition table entry (PATE), which also do a global flush of TLBs + * and partition table caches for the lpid. Otherwise, just do the + * flush. The type of flush (hash or radix) depends on what the previous + * use of the partition ID was, not the new use. */ - asm volatile("ptesync" : : : "memory"); - if (old & PATB_HR) { - asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); - asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); - trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); - } else { - asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); - trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); + if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) { + uv_register_pate(lpid, dw0, dw1); + pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n", + dw0, dw1); + } else if (flush) { + /* + * Boot does not need to flush, because MMU is off and each + * CPU does a tlbiel_all() before switching them on, which + * flushes everything. + */ + flush_partition(lpid, (old & PATB_HR)); } - /* do we need fixup here ?*/ - asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); @@ -447,23 +471,48 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, return true; } -int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid) -{ - unsigned long i; +/* + * Does the CPU support tlbie? + */ +bool tlbie_capable __read_mostly = true; +EXPORT_SYMBOL(tlbie_capable); - if (radix_enabled()) - return radix__ioremap_range(ea, pa, size, prot, nid); - - for (i = 0; i < size; i += PAGE_SIZE) { - int err = map_kernel_page(ea + i, pa + i, prot); - if (err) { - if (slab_is_available()) - unmap_kernel_range(ea, size); - else - WARN_ON_ONCE(1); /* Should clean up */ - return err; - } +/* + * Should tlbie be used for management of CPU TLBs, for kernel and process + * address spaces? tlbie may still be used for nMMU accelerators, and for KVM + * guest address spaces. + */ +bool tlbie_enabled __read_mostly = true; + +static int __init setup_disable_tlbie(char *str) +{ + if (!radix_enabled()) { + pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n"); + return 1; } + tlbie_capable = false; + tlbie_enabled = false; + + return 1; +} +__setup("disable_tlbie", setup_disable_tlbie); + +static int __init pgtable_debugfs_setup(void) +{ + if (!tlbie_capable) + return 0; + + /* + * There is no locking vs tlb flushing when changing this value. + * The tlb flushers will see one value or another, and use either + * tlbie or tlbiel with IPIs. In both cases the TLBs will be + * invalidated as expected. + */ + debugfs_create_bool("tlbie_enabled", 0600, + powerpc_debugfs_root, + &tlbie_enabled); + return 0; } +arch_initcall(pgtable_debugfs_setup); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index b4ca9e95e67811..3a1fbf9cb8f86e 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -27,25 +27,13 @@ #include #include #include +#include #include unsigned int mmu_pid_bits; unsigned int mmu_base_pid; -static int native_register_process_table(unsigned long base, unsigned long pg_sz, - unsigned long table_size) -{ - unsigned long patb0, patb1; - - patb0 = be64_to_cpu(partition_tb[0].patb0); - patb1 = base | table_size | PATB_GR; - - mmu_partition_table_set_entry(0, patb0, patb1); - - return 0; -} - static __ref void *early_alloc_pgtable(unsigned long size, int nid, unsigned long region_start, unsigned long region_end) { @@ -380,18 +368,6 @@ static void __init radix_init_pgtable(void) */ rts_field = radix__get_tree_size(); process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); - /* - * Fill in the partition table. We are suppose to use effective address - * of process table here. But our linear mapping also enable us to use - * physical address here. - */ - register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); - pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); - asm volatile("ptesync" : : : "memory"); - asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); - asm volatile("eieio; tlbsync; ptesync" : : : "memory"); - trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1); /* * The init_mm context is given the first available (non-zero) PID, @@ -412,20 +388,15 @@ static void __init radix_init_pgtable(void) static void __init radix_init_partition_table(void) { - unsigned long rts_field, dw0; + unsigned long rts_field, dw0, dw1; mmu_partition_table_init(); rts_field = radix__get_tree_size(); dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR; - mmu_partition_table_set_entry(0, dw0, 0); + dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR; + mmu_partition_table_set_entry(0, dw0, dw1, false); pr_info("Initializing Radix MMU\n"); - pr_info("Partition table %p\n", partition_tb); -} - -void __init radix_init_native(void) -{ - register_process_table = native_register_process_table; } static int __init get_idx_from_shift(unsigned int shift) @@ -621,8 +592,9 @@ void __init radix__early_init_mmu(void) __pmd_frag_nr = RADIX_PMD_FRAG_NR; __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT; + radix_init_pgtable(); + if (!firmware_has_feature(FW_FEATURE_LPAR)) { - radix_init_native(); lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); radix_init_partition_table(); @@ -633,11 +605,9 @@ void __init radix__early_init_mmu(void) memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); - radix_init_pgtable(); /* Switch to the guard PID before turning on MMU */ radix__switch_mmu_context(NULL, &init_mm); - if (cpu_has_feature(CPU_FTR_HVMODE)) - tlbiel_all(); + tlbiel_all(); } void radix__early_init_mmu_secondary(void) @@ -650,14 +620,14 @@ void radix__early_init_mmu_secondary(void) lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); - mtspr(SPRN_PTCR, - __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); + set_ptcr_when_no_uv(__pa(partition_tb) | + (PATB_SIZE_SHIFT - 12)); + radix_init_amor(); } radix__switch_mmu_context(NULL, &init_mm); - if (cpu_has_feature(CPU_FTR_HVMODE)) - tlbiel_all(); + tlbiel_all(); } void radix__mmu_cleanup_all(void) @@ -667,7 +637,7 @@ void radix__mmu_cleanup_all(void) if (!firmware_has_feature(FW_FEATURE_LPAR)) { lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); - mtspr(SPRN_PTCR, 0); + set_ptcr_when_no_uv(0); powernv_set_nmmu_ptcr(0); radix__flush_tlb_all(); } @@ -737,8 +707,8 @@ static int __meminit stop_machine_change_mapping(void *data) spin_unlock(&init_mm.page_table_lock); pte_clear(&init_mm, params->aligned_start, params->pte); - create_physical_mapping(params->aligned_start, params->start, -1); - create_physical_mapping(params->end, params->aligned_end, -1); + create_physical_mapping(__pa(params->aligned_start), __pa(params->start), -1); + create_physical_mapping(__pa(params->end), __pa(params->aligned_end), -1); spin_lock(&init_mm.page_table_lock); return 0; } @@ -902,7 +872,7 @@ int __meminit radix__create_section_mapping(unsigned long start, unsigned long e return -1; } - return create_physical_mapping(start, end, nid); + return create_physical_mapping(__pa(start), __pa(end), nid); } int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) @@ -1218,26 +1188,6 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) return 1; } -int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, - pgprot_t prot, int nid) -{ - if (likely(slab_is_available())) { - int err = ioremap_page_range(ea, ea + size, pa, prot); - if (err) - unmap_kernel_range(ea, size); - return err; - } else { - unsigned long i; - - for (i = 0; i < size; i += PAGE_SIZE) { - int err = map_kernel_page(ea + i, pa + i, prot); - if (WARN_ON_ONCE(err)) /* Should clean up */ - return err; - } - return 0; - } -} - int __init arch_ioremap_p4d_supported(void) { return 0; diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 71f7fede2fa477..631be42abd3306 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -51,11 +51,15 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) * and partition table entries. Then flush the remaining sets of the * TLB. */ - tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0); - for (set = 1; set < num_sets; set++) - tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0); - /* Do the same for process scoped entries. */ + if (early_cpu_has_feature(CPU_FTR_HVMODE)) { + /* MSR[HV] should flush partition scope translations first. */ + tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0); + for (set = 1; set < num_sets; set++) + tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0); + } + + /* Flush process scoped entries. */ tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1); for (set = 1; set < num_sets; set++) tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); @@ -116,22 +120,6 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric) trace_tlbie(0, 0, rb, rs, ric, prs, r); } -static __always_inline void __tlbiel_lpid(unsigned long lpid, int set, - unsigned long ric) -{ - unsigned long rb,rs,prs,r; - - rb = PPC_BIT(52); /* IS = 2 */ - rb |= set << PPC_BITLSHIFT(51); - rs = 0; /* LPID comes from LPIDR */ - prs = 0; /* partition scoped */ - r = 1; /* radix format */ - - asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) - : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); - trace_tlbie(lpid, 1, rb, rs, ric, prs, r); -} - static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) { unsigned long rb,rs,prs,r; @@ -146,23 +134,20 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) trace_tlbie(lpid, 0, rb, rs, ric, prs, r); } -static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set, - unsigned long ric) +static __always_inline void __tlbie_lpid_guest(unsigned long lpid, unsigned long ric) { unsigned long rb,rs,prs,r; rb = PPC_BIT(52); /* IS = 2 */ - rb |= set << PPC_BITLSHIFT(51); - rs = 0; /* LPID comes from LPIDR */ + rs = lpid; prs = 1; /* process scoped */ r = 1; /* radix format */ - asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); - trace_tlbie(lpid, 1, rb, rs, ric, prs, r); + trace_tlbie(lpid, 0, rb, rs, ric, prs, r); } - static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid, unsigned long ap, unsigned long ric) { @@ -285,32 +270,37 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } -static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric) +struct tlbiel_pid { + unsigned long pid; + unsigned long ric; +}; + +static void do_tlbiel_pid(void *info) { - int set; + struct tlbiel_pid *t = info; - VM_BUG_ON(mfspr(SPRN_LPID) != lpid); + if (t->ric == RIC_FLUSH_TLB) + _tlbiel_pid(t->pid, RIC_FLUSH_TLB); + else if (t->ric == RIC_FLUSH_PWC) + _tlbiel_pid(t->pid, RIC_FLUSH_PWC); + else + _tlbiel_pid(t->pid, RIC_FLUSH_ALL); +} - asm volatile("ptesync": : :"memory"); +static inline void _tlbiel_pid_multicast(struct mm_struct *mm, + unsigned long pid, unsigned long ric) +{ + struct cpumask *cpus = mm_cpumask(mm); + struct tlbiel_pid t = { .pid = pid, .ric = ric }; + on_each_cpu_mask(cpus, do_tlbiel_pid, &t, 1); /* - * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, - * also flush the entire Page Walk Cache. + * Always want the CPU translations to be invalidated with tlbiel in + * these paths, so while coprocessors must use tlbie, we can not + * optimise away the tlbiel component. */ - __tlbiel_lpid(lpid, 0, ric); - - /* For PWC, only one flush is needed */ - if (ric == RIC_FLUSH_PWC) { - asm volatile("ptesync": : :"memory"); - return; - } - - /* For the remaining sets, just flush the TLB */ - for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) - __tlbiel_lpid(lpid, set, RIC_FLUSH_TLB); - - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST "; isync" : : :"memory"); + if (atomic_read(&mm->context.copros) > 0) + _tlbie_pid(pid, RIC_FLUSH_ALL); } static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) @@ -337,35 +327,28 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } -static __always_inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) +static __always_inline void _tlbie_lpid_guest(unsigned long lpid, unsigned long ric) { - int set; - - VM_BUG_ON(mfspr(SPRN_LPID) != lpid); - - asm volatile("ptesync": : :"memory"); - /* - * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, - * also flush the entire Page Walk Cache. + * Workaround the fact that the "ric" argument to __tlbie_pid + * must be a compile-time contraint to match the "i" constraint + * in the asm statement. */ - __tlbiel_lpid_guest(lpid, 0, ric); - - /* For PWC, only one flush is needed */ - if (ric == RIC_FLUSH_PWC) { - asm volatile("ptesync": : :"memory"); - return; + switch (ric) { + case RIC_FLUSH_TLB: + __tlbie_lpid_guest(lpid, RIC_FLUSH_TLB); + break; + case RIC_FLUSH_PWC: + __tlbie_lpid_guest(lpid, RIC_FLUSH_PWC); + break; + case RIC_FLUSH_ALL: + default: + __tlbie_lpid_guest(lpid, RIC_FLUSH_ALL); } - - /* For the remaining sets, just flush the TLB */ - for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) - __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB); - - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); + fixup_tlbie_lpid(lpid); + asm volatile("eieio; tlbsync; ptesync": : :"memory"); } - static inline void __tlbiel_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize) @@ -420,6 +403,53 @@ static __always_inline void _tlbie_va(unsigned long va, unsigned long pid, asm volatile("eieio; tlbsync; ptesync": : :"memory"); } +struct tlbiel_va { + unsigned long pid; + unsigned long va; + unsigned long psize; + unsigned long ric; +}; + +static void do_tlbiel_va(void *info) +{ + struct tlbiel_va *t = info; + + if (t->ric == RIC_FLUSH_TLB) + _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB); + else if (t->ric == RIC_FLUSH_PWC) + _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC); + else + _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL); +} + +static inline void _tlbiel_va_multicast(struct mm_struct *mm, + unsigned long va, unsigned long pid, + unsigned long psize, unsigned long ric) +{ + struct cpumask *cpus = mm_cpumask(mm); + struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric }; + on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1); + if (atomic_read(&mm->context.copros) > 0) + _tlbie_va(va, pid, psize, RIC_FLUSH_TLB); +} + +struct tlbiel_va_range { + unsigned long pid; + unsigned long start; + unsigned long end; + unsigned long page_size; + unsigned long psize; + bool also_pwc; +}; + +static void do_tlbiel_va_range(void *info) +{ + struct tlbiel_va_range *t = info; + + _tlbiel_va_range(t->start, t->end, t->pid, t->page_size, + t->psize, t->also_pwc); +} + static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid, unsigned long psize, unsigned long ric) { @@ -443,6 +473,21 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end, asm volatile("eieio; tlbsync; ptesync": : :"memory"); } +static inline void _tlbiel_va_range_multicast(struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned long pid, unsigned long page_size, + unsigned long psize, bool also_pwc) +{ + struct cpumask *cpus = mm_cpumask(mm); + struct tlbiel_va_range t = { .start = start, .end = end, + .pid = pid, .page_size = page_size, + .psize = psize, .also_pwc = also_pwc }; + + on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1); + if (atomic_read(&mm->context.copros) > 0) + _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); +} + /* * Base TLB flushing operations: * @@ -580,10 +625,14 @@ void radix__flush_tlb_mm(struct mm_struct *mm) goto local; } - if (mm_needs_flush_escalation(mm)) - _tlbie_pid(pid, RIC_FLUSH_ALL); - else - _tlbie_pid(pid, RIC_FLUSH_TLB); + if (cputlb_use_tlbie()) { + if (mm_needs_flush_escalation(mm)) + _tlbie_pid(pid, RIC_FLUSH_ALL); + else + _tlbie_pid(pid, RIC_FLUSH_TLB); + } else { + _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB); + } } else { local: _tlbiel_pid(pid, RIC_FLUSH_TLB); @@ -609,7 +658,10 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm) goto local; } } - _tlbie_pid(pid, RIC_FLUSH_ALL); + if (cputlb_use_tlbie()) + _tlbie_pid(pid, RIC_FLUSH_ALL); + else + _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); } else { local: _tlbiel_pid(pid, RIC_FLUSH_ALL); @@ -644,7 +696,10 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, exit_flush_lazy_tlbs(mm); goto local; } - _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); + if (cputlb_use_tlbie()) + _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); + else + _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB); } else { local: _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); @@ -666,6 +721,24 @@ EXPORT_SYMBOL(radix__flush_tlb_page); #define radix__flush_all_mm radix__local_flush_all_mm #endif /* CONFIG_SMP */ +static void do_tlbiel_kernel(void *info) +{ + _tlbiel_pid(0, RIC_FLUSH_ALL); +} + +static inline void _tlbiel_kernel_broadcast(void) +{ + on_each_cpu(do_tlbiel_kernel, NULL, 1); + if (tlbie_capable) { + /* + * Coherent accelerators don't refcount kernel memory mappings, + * so have to always issue a tlbie for them. This is quite a + * slow path anyway. + */ + _tlbie_pid(0, RIC_FLUSH_ALL); + } +} + /* * If kernel TLBIs ever become local rather than global, then * drivers/misc/ocxl/link.c:ocxl_link_add_pe will need some work, as it @@ -673,7 +746,10 @@ EXPORT_SYMBOL(radix__flush_tlb_page); */ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) { - _tlbie_pid(0, RIC_FLUSH_ALL); + if (cputlb_use_tlbie()) + _tlbie_pid(0, RIC_FLUSH_ALL); + else + _tlbiel_kernel_broadcast(); } EXPORT_SYMBOL(radix__flush_tlb_kernel_range); @@ -729,10 +805,14 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, if (local) { _tlbiel_pid(pid, RIC_FLUSH_TLB); } else { - if (mm_needs_flush_escalation(mm)) - _tlbie_pid(pid, RIC_FLUSH_ALL); - else - _tlbie_pid(pid, RIC_FLUSH_TLB); + if (cputlb_use_tlbie()) { + if (mm_needs_flush_escalation(mm)) + _tlbie_pid(pid, RIC_FLUSH_ALL); + else + _tlbie_pid(pid, RIC_FLUSH_TLB); + } else { + _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB); + } } } else { bool hflush = flush_all_sizes; @@ -757,8 +837,8 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, gflush = false; } - asm volatile("ptesync": : :"memory"); if (local) { + asm volatile("ptesync": : :"memory"); __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbiel_va_range(hstart, hend, pid, @@ -767,7 +847,8 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, __tlbiel_va_range(gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G); asm volatile("ptesync": : :"memory"); - } else { + } else if (cputlb_use_tlbie()) { + asm volatile("ptesync": : :"memory"); __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbie_va_range(hstart, hend, pid, @@ -777,6 +858,15 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, PUD_SIZE, MMU_PAGE_1G); fixup_tlbie(); asm volatile("eieio; tlbsync; ptesync": : :"memory"); + } else { + _tlbiel_va_range_multicast(mm, + start, end, pid, page_size, mmu_virtual_psize, false); + if (hflush) + _tlbiel_va_range_multicast(mm, + hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false); + if (gflush) + _tlbiel_va_range_multicast(mm, + gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G, false); } } preempt_enable(); @@ -835,32 +925,19 @@ EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid); /* * Flush partition scoped translations from LPID (=LPIDR) */ -void radix__flush_tlb_lpid(unsigned int lpid) +void radix__flush_all_lpid(unsigned int lpid) { _tlbie_lpid(lpid, RIC_FLUSH_ALL); } -EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid); +EXPORT_SYMBOL_GPL(radix__flush_all_lpid); /* - * Flush partition scoped translations from LPID (=LPIDR) + * Flush process scoped translations from LPID (=LPIDR) */ -void radix__local_flush_tlb_lpid(unsigned int lpid) +void radix__flush_all_lpid_guest(unsigned int lpid) { - _tlbiel_lpid(lpid, RIC_FLUSH_ALL); + _tlbie_lpid_guest(lpid, RIC_FLUSH_ALL); } -EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid); - -/* - * Flush process scoped translations from LPID (=LPIDR). - * Important difference, the guest normally manages its own translations, - * but some cases e.g., vCPU CPU migration require KVM to flush. - */ -void radix__local_flush_tlb_lpid_guest(unsigned int lpid) -{ - _tlbiel_lpid_guest(lpid, RIC_FLUSH_ALL); -} -EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid_guest); - static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize); @@ -966,16 +1043,26 @@ static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm, if (local) { _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); } else { - if (mm_needs_flush_escalation(mm)) - also_pwc = true; + if (cputlb_use_tlbie()) { + if (mm_needs_flush_escalation(mm)) + also_pwc = true; + + _tlbie_pid(pid, + also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); + } else { + _tlbiel_pid_multicast(mm, pid, + also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); + } - _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); } } else { if (local) _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); - else + else if (cputlb_use_tlbie()) _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); + else + _tlbiel_va_range_multicast(mm, + start, end, pid, page_size, psize, also_pwc); } preempt_enable(); } @@ -1017,7 +1104,11 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) exit_flush_lazy_tlbs(mm); goto local; } - _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); + if (cputlb_use_tlbie()) + _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); + else + _tlbiel_va_range_multicast(mm, + addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); } else { local: _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index c617282d5b2aea..2a82984356f81f 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -4,310 +4,18 @@ * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) * * Copyright (C) 2000 Russell King - * - * Consistent memory allocators. Used for DMA devices that want to - * share uncached memory with the processor core. The function return - * is the virtual address and 'dma_handle' is the physical address. - * Mostly stolen from the ARM port, with some changes for PowerPC. - * -- Dan - * - * Reorganized to get rid of the arch-specific consistent_* functions - * and provide non-coherent implementations for the DMA API. -Matt - * - * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() - * implementation. This is pulled straight from ARM and barely - * modified. -Matt */ -#include -#include #include #include -#include #include #include #include #include -#include #include #include -#include - -/* - * This address range defaults to a value that is safe for all - * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It - * can be further configured for specific applications under - * the "Advanced Setup" menu. -Matt - */ -#define CONSISTENT_BASE (IOREMAP_TOP) -#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE) -#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) - -/* - * This is the page table (2MB) covering uncached, DMA consistent allocations - */ -static DEFINE_SPINLOCK(consistent_lock); - -/* - * VM region handling support. - * - * This should become something generic, handling VM region allocations for - * vmalloc and similar (ioremap, module space, etc). - * - * I envisage vmalloc()'s supporting vm_struct becoming: - * - * struct vm_struct { - * struct vm_region region; - * unsigned long flags; - * struct page **pages; - * unsigned int nr_pages; - * unsigned long phys_addr; - * }; - * - * get_vm_area() would then call vm_region_alloc with an appropriate - * struct vm_region head (eg): - * - * struct vm_region vmalloc_head = { - * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), - * .vm_start = VMALLOC_START, - * .vm_end = VMALLOC_END, - * }; - * - * However, vmalloc_head.vm_start is variable (typically, it is dependent on - * the amount of RAM found at boot time.) I would imagine that get_vm_area() - * would have to initialise this each time prior to calling vm_region_alloc(). - */ -struct ppc_vm_region { - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; -}; - -static struct ppc_vm_region consistent_head = { - .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), - .vm_start = CONSISTENT_BASE, - .vm_end = CONSISTENT_END, -}; - -static struct ppc_vm_region * -ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) -{ - unsigned long addr = head->vm_start, end = head->vm_end - size; - unsigned long flags; - struct ppc_vm_region *c, *new; - - new = kmalloc(sizeof(struct ppc_vm_region), gfp); - if (!new) - goto out; - - spin_lock_irqsave(&consistent_lock, flags); - - list_for_each_entry(c, &head->vm_list, vm_list) { - if ((addr + size) < addr) - goto nospc; - if ((addr + size) <= c->vm_start) - goto found; - addr = c->vm_end; - if (addr > end) - goto nospc; - } - - found: - /* - * Insert this entry _before_ the one we found. - */ - list_add_tail(&new->vm_list, &c->vm_list); - new->vm_start = addr; - new->vm_end = addr + size; - - spin_unlock_irqrestore(&consistent_lock, flags); - return new; - - nospc: - spin_unlock_irqrestore(&consistent_lock, flags); - kfree(new); - out: - return NULL; -} - -static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) -{ - struct ppc_vm_region *c; - - list_for_each_entry(c, &head->vm_list, vm_list) { - if (c->vm_start == addr) - goto out; - } - c = NULL; - out: - return c; -} - -/* - * Allocate DMA-coherent memory space and return both the kernel remapped - * virtual and bus address for that space. - */ -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - struct page *page; - struct ppc_vm_region *c; - unsigned long order; - u64 mask = ISA_DMA_THRESHOLD, limit; - - if (dev) { - mask = dev->coherent_dma_mask; - - /* - * Sanity check the DMA mask - it must be non-zero, and - * must be able to be satisfied by a DMA allocation. - */ - if (mask == 0) { - dev_warn(dev, "coherent DMA mask is unset\n"); - goto no_page; - } - - if ((~mask) & ISA_DMA_THRESHOLD) { - dev_warn(dev, "coherent DMA mask %#llx is smaller " - "than system GFP_DMA mask %#llx\n", - mask, (unsigned long long)ISA_DMA_THRESHOLD); - goto no_page; - } - } - - - size = PAGE_ALIGN(size); - limit = (mask + 1) & ~mask; - if ((limit && size >= limit) || - size >= (CONSISTENT_END - CONSISTENT_BASE)) { - printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", - size, mask); - return NULL; - } - - order = get_order(size); - - /* Might be useful if we ever have a real legacy DMA zone... */ - if (mask != 0xffffffff) - gfp |= GFP_DMA; - - page = alloc_pages(gfp, order); - if (!page) - goto no_page; - - /* - * Invalidate any data that might be lurking in the - * kernel direct-mapped region for device DMA. - */ - { - unsigned long kaddr = (unsigned long)page_address(page); - memset(page_address(page), 0, size); - flush_dcache_range(kaddr, kaddr + size); - } - - /* - * Allocate a virtual address in the consistent mapping region. - */ - c = ppc_vm_region_alloc(&consistent_head, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); - if (c) { - unsigned long vaddr = c->vm_start; - struct page *end = page + (1 << order); - - split_page(page, order); - - /* - * Set the "dma handle" - */ - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - - do { - SetPageReserved(page); - map_kernel_page(vaddr, page_to_phys(page), - pgprot_noncached(PAGE_KERNEL)); - page++; - vaddr += PAGE_SIZE; - } while (size -= PAGE_SIZE); - - /* - * Free the otherwise unused pages. - */ - while (page < end) { - __free_page(page); - page++; - } - - return (void *)c->vm_start; - } - - if (page) - __free_pages(page, order); - no_page: - return NULL; -} - -/* - * free a page as defined by the above mapping. - */ -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - struct ppc_vm_region *c; - unsigned long flags, addr; - - size = PAGE_ALIGN(size); - - spin_lock_irqsave(&consistent_lock, flags); - - c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); - if (!c) - goto no_area; - - if ((c->vm_end - c->vm_start) != size) { - printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); - dump_stack(); - size = c->vm_end - c->vm_start; - } - - addr = c->vm_start; - do { - pte_t *ptep; - unsigned long pfn; - - ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr), - addr), - addr), - addr); - if (!pte_none(*ptep) && pte_present(*ptep)) { - pfn = pte_pfn(*ptep); - pte_clear(&init_mm, addr, ptep); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - __free_reserved_page(page); - } - } - addr += PAGE_SIZE; - } while (size -= PAGE_SIZE); - - flush_tlb_kernel_range(c->vm_start, c->vm_end); - - list_del(&c->vm_list); - - spin_unlock_irqrestore(&consistent_lock, flags); - - kfree(c); - return; - - no_area: - spin_unlock_irqrestore(&consistent_lock, flags); - printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", - __func__, vaddr); - dump_stack(); -} - /* * make an area consistent. */ @@ -408,23 +116,9 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, __dma_sync_page(paddr, size, dir); } -/* - * Return the PFN for a given cpu virtual address returned by arch_dma_alloc. - */ -long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, - dma_addr_t dma_addr) +void arch_dma_prep_coherent(struct page *page, size_t size) { - /* This should always be populated, so we don't test every - * level. If that fails, we'll have a nice crash which - * will be as good as a BUG_ON() - */ - unsigned long cpu_addr = (unsigned long)vaddr; - pgd_t *pgd = pgd_offset_k(cpu_addr); - pud_t *pud = pud_offset(pgd, cpu_addr); - pmd_t *pmd = pmd_offset(pud, cpu_addr); - pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); + unsigned long kaddr = (unsigned long)page_address(page); - if (pte_none(*ptep) || !pte_present(*ptep)) - return 0; - return pte_pfn(*ptep); + flush_dcache_range(kaddr, kaddr + size); } diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c new file mode 100644 index 00000000000000..fc669643ce6a05 --- /dev/null +++ b/arch/powerpc/mm/ioremap.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include + +unsigned long ioremap_bot; +EXPORT_SYMBOL(ioremap_bot); + +void __iomem *ioremap(phys_addr_t addr, unsigned long size) +{ + pgprot_t prot = pgprot_noncached(PAGE_KERNEL); + void *caller = __builtin_return_address(0); + + if (iowa_is_active()) + return iowa_ioremap(addr, size, prot, caller); + return __ioremap_caller(addr, size, prot, caller); +} +EXPORT_SYMBOL(ioremap); + +void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size) +{ + pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); + void *caller = __builtin_return_address(0); + + if (iowa_is_active()) + return iowa_ioremap(addr, size, prot, caller); + return __ioremap_caller(addr, size, prot, caller); +} +EXPORT_SYMBOL(ioremap_wc); + +void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) +{ + pgprot_t prot = pgprot_cached(PAGE_KERNEL); + void *caller = __builtin_return_address(0); + + if (iowa_is_active()) + return iowa_ioremap(addr, size, prot, caller); + return __ioremap_caller(addr, size, prot, caller); +} + +void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) +{ + pte_t pte = __pte(flags); + void *caller = __builtin_return_address(0); + + /* writeable implies dirty for kernel addresses */ + if (pte_write(pte)) + pte = pte_mkdirty(pte); + + /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ + pte = pte_exprotect(pte); + pte = pte_mkprivileged(pte); + + if (iowa_is_active()) + return iowa_ioremap(addr, size, pte_pgprot(pte), caller); + return __ioremap_caller(addr, size, pte_pgprot(pte), caller); +} +EXPORT_SYMBOL(ioremap_prot); + +int early_ioremap_range(unsigned long ea, phys_addr_t pa, + unsigned long size, pgprot_t prot) +{ + unsigned long i; + + for (i = 0; i < size; i += PAGE_SIZE) { + int err = map_kernel_page(ea + i, pa + i, prot); + + if (WARN_ON_ONCE(err)) /* Should clean up */ + return err; + } + + return 0; +} + +void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, + pgprot_t prot, void *caller) +{ + struct vm_struct *area; + int ret; + unsigned long va; + + area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller); + if (area == NULL) + return NULL; + + area->phys_addr = pa; + va = (unsigned long)area->addr; + + ret = ioremap_page_range(va, va + size, pa, prot); + if (!ret) + return (void __iomem *)area->addr + offset; + + unmap_kernel_range(va, size); + free_vm_area(area); + + return NULL; +} diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c new file mode 100644 index 00000000000000..f36121f25243f6 --- /dev/null +++ b/arch/powerpc/mm/ioremap_32.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include + +#include + +void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size) +{ + pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); + + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_wt); + +void __iomem * +__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) +{ + unsigned long v; + phys_addr_t p, offset; + int err; + + /* + * Choose an address to map it to. + * Once the vmalloc system is running, we use it. + * Before then, we use space going down from IOREMAP_TOP + * (ioremap_bot records where we're up to). + */ + p = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; + size = PAGE_ALIGN(addr + size) - p; + + /* + * If the address lies within the first 16 MB, assume it's in ISA + * memory space + */ + if (p < 16 * 1024 * 1024) + p += _ISA_MEM_BASE; + +#ifndef CONFIG_CRASH_DUMP + /* + * Don't allow anybody to remap normal RAM that we're using. + * mem_init() sets high_memory so only do the check after that. + */ + if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && + page_is_ram(__phys_to_pfn(p))) { + pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__, + (unsigned long long)p, __builtin_return_address(0)); + return NULL; + } +#endif + + if (size == 0) + return NULL; + + /* + * Is it already mapped? Perhaps overlapped by a previous + * mapping. + */ + v = p_block_mapped(p); + if (v) + return (void __iomem *)v + offset; + + if (slab_is_available()) + return do_ioremap(p, offset, size, prot, caller); + + /* + * Should check if it is a candidate for a BAT mapping + */ + + err = early_ioremap_range(ioremap_bot - size, p, size, prot); + if (err) + return NULL; + ioremap_bot -= size; + + return (void __iomem *)ioremap_bot + offset; +} + +void iounmap(volatile void __iomem *addr) +{ + /* + * If mapped by BATs then there is nothing to do. + * Calling vfree() generates a benign warning. + */ + if (v_block_mapped((unsigned long)addr)) + return; + + if (addr > high_memory && (unsigned long)addr < ioremap_bot) + vunmap((void *)(PAGE_MASK & (unsigned long)addr)); +} +EXPORT_SYMBOL(iounmap); diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c new file mode 100644 index 00000000000000..fd29e51700cd0e --- /dev/null +++ b/arch/powerpc/mm/ioremap_64.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include + +/** + * Low level function to establish the page tables for an IO mapping + */ +void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) +{ + int ret; + unsigned long va = (unsigned long)ea; + + /* We don't support the 4K PFN hack with ioremap */ + if (pgprot_val(prot) & H_PAGE_4K_PFN) + return NULL; + + if ((ea + size) >= (void *)IOREMAP_END) { + pr_warn("Outside the supported range\n"); + return NULL; + } + + WARN_ON(pa & ~PAGE_MASK); + WARN_ON(((unsigned long)ea) & ~PAGE_MASK); + WARN_ON(size & ~PAGE_MASK); + + if (slab_is_available()) { + ret = ioremap_page_range(va, va + size, pa, prot); + if (ret) + unmap_kernel_range(va, size); + } else { + ret = early_ioremap_range(va, pa, size, prot); + } + + if (ret) + return NULL; + + return (void __iomem *)ea; +} +EXPORT_SYMBOL(__ioremap_at); + +/** + * Low level function to tear down the page tables for an IO mapping. This is + * used for mappings that are manipulated manually, like partial unmapping of + * PCI IOs or ISA space. + */ +void __iounmap_at(void *ea, unsigned long size) +{ + WARN_ON(((unsigned long)ea) & ~PAGE_MASK); + WARN_ON(size & ~PAGE_MASK); + + unmap_kernel_range((unsigned long)ea, size); +} +EXPORT_SYMBOL(__iounmap_at); + +void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, + pgprot_t prot, void *caller) +{ + phys_addr_t paligned, offset; + void __iomem *ret; + int err; + + /* We don't support the 4K PFN hack with ioremap */ + if (pgprot_val(prot) & H_PAGE_4K_PFN) + return NULL; + + /* + * Choose an address to map it to. Once the vmalloc system is running, + * we use it. Before that, we map using addresses going up from + * ioremap_bot. vmalloc will use the addresses from IOREMAP_BASE + * through ioremap_bot. + */ + paligned = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; + size = PAGE_ALIGN(addr + size) - paligned; + + if (size == 0 || paligned == 0) + return NULL; + + if (slab_is_available()) + return do_ioremap(paligned, offset, size, prot, caller); + + err = early_ioremap_range(ioremap_bot, paligned, size, prot); + if (err) + return NULL; + + ret = (void __iomem *)ioremap_bot + offset; + ioremap_bot += size; + + return ret; +} + +/* + * Unmap an IO region and remove it from vmalloc'd list. + * Access to IO memory should be serialized by driver. + */ +void iounmap(volatile void __iomem *token) +{ + void *addr; + + if (!slab_is_available()) + return; + + addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK); + + if ((unsigned long)addr < ioremap_bot) { + pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr); + return; + } + vunmap(addr); +} +EXPORT_SYMBOL(iounmap); diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 74f4555a62ba50..802387b231adcd 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -46,7 +47,19 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l kasan_populate_pte(new, PAGE_READONLY); else kasan_populate_pte(new, PAGE_KERNEL_RO); - pmd_populate_kernel(&init_mm, pmd, new); + + smp_wmb(); /* See comment in __pte_alloc */ + + spin_lock(&init_mm.page_table_lock); + /* Has another populated it ? */ + if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) { + pmd_populate_kernel(&init_mm, pmd, new); + new = NULL; + } + spin_unlock(&init_mm.page_table_lock); + + if (new && slab_is_available()) + pte_free_kernel(&init_mm, new); } return 0; } @@ -74,7 +87,7 @@ static int __ref kasan_init_region(void *start, size_t size) if (!slab_is_available()) block = memblock_alloc(k_end - k_start, PAGE_SIZE); - for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) { + for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); void *va = block ? block + k_cur - k_start : kasan_get_one_page(); pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); @@ -137,7 +150,11 @@ void __init kasan_init(void) #ifdef CONFIG_MODULES void *module_alloc(unsigned long size) { - void *base = vmalloc_exec(size); + void *base; + + base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); if (!base) return NULL; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 9191a66b3bc556..be941d382c8d10 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -302,12 +302,9 @@ void __init mem_init(void) pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); #endif /* CONFIG_HIGHMEM */ -#ifdef CONFIG_NOT_COHERENT_CACHE - pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", - IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); -#endif /* CONFIG_NOT_COHERENT_CACHE */ - pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", - ioremap_bot, IOREMAP_TOP); + if (ioremap_bot != IOREMAP_TOP) + pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", + ioremap_bot, IOREMAP_TOP); pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", VMALLOC_START, VMALLOC_END); #endif /* CONFIG_PPC32 */ @@ -406,63 +403,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, } EXPORT_SYMBOL(flush_icache_user_range); -/* - * This is called at the end of handling a user page fault, when the - * fault has been handled by updating a PTE in the linux page tables. - * We use it to preload an HPTE into the hash table corresponding to - * the updated linux PTE. - * - * This must always be called with the pte lock held. - */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep) -{ -#ifdef CONFIG_PPC_BOOK3S - /* - * We don't need to worry about _PAGE_PRESENT here because we are - * called with either mm->page_table_lock held or ptl lock held - */ - unsigned long trap; - bool is_exec; - - if (radix_enabled()) { - prefetch((void *)address); - return; - } - - /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(*ptep) || address >= TASK_SIZE) - return; - - /* We try to figure out if we are coming from an instruction - * access fault and pass that down to __hash_page so we avoid - * double-faulting on execution of fresh text. We have to test - * for regs NULL since init will get here first thing at boot - * - * We also avoid filling the hash if not coming from a fault - */ - - trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; - switch (trap) { - case 0x300: - is_exec = false; - break; - case 0x400: - is_exec = true; - break; - default: - return; - } - - hash_preload(vma->vm_mm, address, is_exec, trap); -#endif /* CONFIG_PPC_BOOK3S */ -#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ - && defined(CONFIG_HUGETLB_PAGE) - if (is_vm_hugetlb_page(vma)) - book3e_hugetlb_preload(vma, address, *ptep); -#endif -} - /* * System memory should not be in /proc/iomem but various tools expect it * (eg kdump). diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 32c1a191c28aec..c750ac9ec7130e 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -82,10 +82,6 @@ static inline void print_system_hash_info(void) {} #else /* CONFIG_PPC_MMU_NOHASH */ -extern void hash_preload(struct mm_struct *mm, unsigned long ea, - bool is_exec, unsigned long trap); - - extern void _tlbie(unsigned long address); extern void _tlbia(void); @@ -95,6 +91,8 @@ void print_system_hash_info(void); #ifdef CONFIG_PPC32 +void hash_preload(struct mm_struct *mm, unsigned long ea); + extern void mapin_ram(void); extern void setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot); @@ -108,7 +106,6 @@ extern u8 early_hash[]; #endif /* CONFIG_PPC32 */ -extern unsigned long ioremap_bot; extern unsigned long __max_low_memory; extern phys_addr_t __initial_memory_limit_addr; extern phys_addr_t total_memory; diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c index 61915f4d3c7f0e..8b88be91b622ab 100644 --- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c +++ b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c @@ -122,8 +122,8 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) return found; } -void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, - pte_t pte) +static void +book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) { unsigned long mas1, mas2; u64 mas7_3; @@ -183,6 +183,18 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, local_irq_restore(flags); } +/* + * This is called at the end of handling a user page fault, when the + * fault has been handled by updating a PTE in the linux page tables. + * + * This must always be called with the pte lock held. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + if (is_vm_hugetlb_page(vma)) + book3e_hugetlb_preload(vma, address, *ptep); +} + void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { struct hstate *hstate = hstate_file(vma->vm_file); diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index bf60983a58c781..696f568253a0ca 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -703,6 +703,8 @@ static void __init early_init_mmu_global(void) * for use by the TLB miss code */ linear_map_top = memblock_end_of_DRAM(); + + ioremap_bot = IOREMAP_BASE; } static void __init early_mmu_set_memory_limit(void) diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 35cb96cfc258d0..8ec5dfb65b2eb1 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -27,166 +27,13 @@ #include #include #include -#include #include #include #include -unsigned long ioremap_bot; -EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ - extern char etext[], _stext[], _sinittext[], _einittext[]; -void __iomem * -ioremap(phys_addr_t addr, unsigned long size) -{ - pgprot_t prot = pgprot_noncached(PAGE_KERNEL); - - return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); -} -EXPORT_SYMBOL(ioremap); - -void __iomem * -ioremap_wc(phys_addr_t addr, unsigned long size) -{ - pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); - - return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); -} -EXPORT_SYMBOL(ioremap_wc); - -void __iomem * -ioremap_wt(phys_addr_t addr, unsigned long size) -{ - pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); - - return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); -} -EXPORT_SYMBOL(ioremap_wt); - -void __iomem * -ioremap_coherent(phys_addr_t addr, unsigned long size) -{ - pgprot_t prot = pgprot_cached(PAGE_KERNEL); - - return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); -} -EXPORT_SYMBOL(ioremap_coherent); - -void __iomem * -ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) -{ - pte_t pte = __pte(flags); - - /* writeable implies dirty for kernel addresses */ - if (pte_write(pte)) - pte = pte_mkdirty(pte); - - /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ - pte = pte_exprotect(pte); - pte = pte_mkprivileged(pte); - - return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0)); -} -EXPORT_SYMBOL(ioremap_prot); - -void __iomem * -__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) -{ - return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0)); -} - -void __iomem * -__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) -{ - unsigned long v, i; - phys_addr_t p; - int err; - - /* - * Choose an address to map it to. - * Once the vmalloc system is running, we use it. - * Before then, we use space going down from IOREMAP_TOP - * (ioremap_bot records where we're up to). - */ - p = addr & PAGE_MASK; - size = PAGE_ALIGN(addr + size) - p; - - /* - * If the address lies within the first 16 MB, assume it's in ISA - * memory space - */ - if (p < 16*1024*1024) - p += _ISA_MEM_BASE; - -#ifndef CONFIG_CRASH_DUMP - /* - * Don't allow anybody to remap normal RAM that we're using. - * mem_init() sets high_memory so only do the check after that. - */ - if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && - page_is_ram(__phys_to_pfn(p))) { - printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n", - (unsigned long long)p, __builtin_return_address(0)); - return NULL; - } -#endif - - if (size == 0) - return NULL; - - /* - * Is it already mapped? Perhaps overlapped by a previous - * mapping. - */ - v = p_block_mapped(p); - if (v) - goto out; - - if (slab_is_available()) { - struct vm_struct *area; - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (area == 0) - return NULL; - area->phys_addr = p; - v = (unsigned long) area->addr; - } else { - v = (ioremap_bot -= size); - } - - /* - * Should check if it is a candidate for a BAT mapping - */ - - err = 0; - for (i = 0; i < size && err == 0; i += PAGE_SIZE) - err = map_kernel_page(v + i, p + i, prot); - if (err) { - if (slab_is_available()) - vunmap((void *)v); - return NULL; - } - -out: - return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); -} -EXPORT_SYMBOL(__ioremap); - -void iounmap(volatile void __iomem *addr) -{ - /* - * If mapped by BATs then there is nothing to do. - * Calling vfree() generates a benign warning. - */ - if (v_block_mapped((unsigned long)addr)) - return; - - if (addr > high_memory && (unsigned long) addr < ioremap_bot) - vunmap((void *) (PAGE_MASK & (unsigned long)addr)); -} -EXPORT_SYMBOL(iounmap); - static void __init *early_alloc_pgtable(unsigned long size) { void *ptr = memblock_alloc(size, size); @@ -252,7 +99,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); #ifdef CONFIG_PPC_BOOK3S_32 if (ktext) - hash_preload(&init_mm, v, false, 0x300); + hash_preload(&init_mm, v); #endif v += PAGE_SIZE; p += PAGE_SIZE; diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 9ad59b73398475..e78832dce7bb43 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * This file contains ioremap and related functions for 64-bit machines. + * This file contains pgtable related functions for 64-bit machines. * * Derived from arch/ppc64/mm/init.c * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -98,208 +97,8 @@ unsigned long __pte_frag_nr; EXPORT_SYMBOL(__pte_frag_nr); unsigned long __pte_frag_size_shift; EXPORT_SYMBOL(__pte_frag_size_shift); -unsigned long ioremap_bot; -#else /* !CONFIG_PPC_BOOK3S_64 */ -unsigned long ioremap_bot = IOREMAP_BASE; #endif -int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid) -{ - unsigned long i; - - for (i = 0; i < size; i += PAGE_SIZE) { - int err = map_kernel_page(ea + i, pa + i, prot); - if (err) { - if (slab_is_available()) - unmap_kernel_range(ea, size); - else - WARN_ON_ONCE(1); /* Should clean up */ - return err; - } - } - - return 0; -} - -/** - * __ioremap_at - Low level function to establish the page tables - * for an IO mapping - */ -void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) -{ - /* We don't support the 4K PFN hack with ioremap */ - if (pgprot_val(prot) & H_PAGE_4K_PFN) - return NULL; - - if ((ea + size) >= (void *)IOREMAP_END) { - pr_warn("Outside the supported range\n"); - return NULL; - } - - WARN_ON(pa & ~PAGE_MASK); - WARN_ON(((unsigned long)ea) & ~PAGE_MASK); - WARN_ON(size & ~PAGE_MASK); - - if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE)) - return NULL; - - return (void __iomem *)ea; -} - -/** - * __iounmap_from - Low level function to tear down the page tables - * for an IO mapping. This is used for mappings that - * are manipulated manually, like partial unmapping of - * PCI IOs or ISA space. - */ -void __iounmap_at(void *ea, unsigned long size) -{ - WARN_ON(((unsigned long)ea) & ~PAGE_MASK); - WARN_ON(size & ~PAGE_MASK); - - unmap_kernel_range((unsigned long)ea, size); -} - -void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, - pgprot_t prot, void *caller) -{ - phys_addr_t paligned; - void __iomem *ret; - - /* - * Choose an address to map it to. - * Once the imalloc system is running, we use it. - * Before that, we map using addresses going - * up from ioremap_bot. imalloc will use - * the addresses from ioremap_bot through - * IMALLOC_END - * - */ - paligned = addr & PAGE_MASK; - size = PAGE_ALIGN(addr + size) - paligned; - - if ((size == 0) || (paligned == 0)) - return NULL; - - if (slab_is_available()) { - struct vm_struct *area; - - area = __get_vm_area_caller(size, VM_IOREMAP, - ioremap_bot, IOREMAP_END, - caller); - if (area == NULL) - return NULL; - - area->phys_addr = paligned; - ret = __ioremap_at(paligned, area->addr, size, prot); - } else { - ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); - if (ret) - ioremap_bot += size; - } - - if (ret) - ret += addr & ~PAGE_MASK; - return ret; -} - -void __iomem * __ioremap(phys_addr_t addr, unsigned long size, - unsigned long flags) -{ - return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0)); -} - -void __iomem * ioremap(phys_addr_t addr, unsigned long size) -{ - pgprot_t prot = pgprot_noncached(PAGE_KERNEL); - void *caller = __builtin_return_address(0); - - if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, prot, caller); - return __ioremap_caller(addr, size, prot, caller); -} - -void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) -{ - pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); - void *caller = __builtin_return_address(0); - - if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, prot, caller); - return __ioremap_caller(addr, size, prot, caller); -} - -void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) -{ - pgprot_t prot = pgprot_cached(PAGE_KERNEL); - void *caller = __builtin_return_address(0); - - if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, prot, caller); - return __ioremap_caller(addr, size, prot, caller); -} - -void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, - unsigned long flags) -{ - pte_t pte = __pte(flags); - void *caller = __builtin_return_address(0); - - /* writeable implies dirty for kernel addresses */ - if (pte_write(pte)) - pte = pte_mkdirty(pte); - - /* we don't want to let _PAGE_EXEC leak out */ - pte = pte_exprotect(pte); - /* - * Force kernel mapping. - */ - pte = pte_mkprivileged(pte); - - if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller); - return __ioremap_caller(addr, size, pte_pgprot(pte), caller); -} - - -/* - * Unmap an IO region and remove it from imalloc'd list. - * Access to IO memory should be serialized by driver. - */ -void __iounmap(volatile void __iomem *token) -{ - void *addr; - - if (!slab_is_available()) - return; - - addr = (void *) ((unsigned long __force) - PCI_FIX_ADDR(token) & PAGE_MASK); - if ((unsigned long)addr < ioremap_bot) { - printk(KERN_WARNING "Attempt to iounmap early bolted mapping" - " at 0x%p\n", addr); - return; - } - vunmap(addr); -} - -void iounmap(volatile void __iomem *token) -{ - if (ppc_md.iounmap) - ppc_md.iounmap(token); - else - __iounmap(token); -} - -EXPORT_SYMBOL(ioremap); -EXPORT_SYMBOL(ioremap_wc); -EXPORT_SYMBOL(ioremap_prot); -EXPORT_SYMBOL(__ioremap); -EXPORT_SYMBOL(__ioremap_at); -EXPORT_SYMBOL(iounmap); -EXPORT_SYMBOL(__iounmap); -EXPORT_SYMBOL(__iounmap_at); - #ifndef __PAGETABLE_PUD_FOLDED /* 4 level page table */ struct page *pgd_page(pgd_t pgd) diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c index a0d23e96e841a8..4154feac1da340 100644 --- a/arch/powerpc/mm/ptdump/bats.c +++ b/arch/powerpc/mm/ptdump/bats.c @@ -149,7 +149,7 @@ static int bats_show_603(struct seq_file *m, void *v) static int bats_open(struct inode *inode, struct file *file) { - if (cpu_has_feature(CPU_FTR_601)) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) return single_open(file, bats_show_601, NULL); return single_open(file, bats_show_603, NULL); diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c index 72f0e4a3d839b5..a07278027c6f4a 100644 --- a/arch/powerpc/mm/ptdump/hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -237,7 +237,6 @@ static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64 return -1; } -#ifdef CONFIG_PPC_PSERIES static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) { struct hash_pte ptes[4]; @@ -274,7 +273,6 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 * } return -1; } -#endif static void decode_r(int bps, unsigned long r, unsigned long *rpn, int *aps, unsigned long *lp_bits) @@ -316,10 +314,9 @@ static void decode_r(int bps, unsigned long r, unsigned long *rpn, int *aps, static int base_hpte_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) { -#ifdef CONFIG_PPC_PSERIES - if (firmware_has_feature(FW_FEATURE_LPAR)) + if (IS_ENABLED(CONFIG_PPC_PSERIES) && firmware_has_feature(FW_FEATURE_LPAR)) return pseries_find(ea, psize, primary, v, r); -#endif + return native_find(ea, psize, primary, v, r); } @@ -386,12 +383,13 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) psize = mmu_vmalloc_psize; else psize = mmu_io_psize; -#ifdef CONFIG_PPC_64K_PAGES + /* check for secret 4K mappings */ - if (((pteval & H_PAGE_COMBO) == H_PAGE_COMBO) || - ((pteval & H_PAGE_4K_PFN) == H_PAGE_4K_PFN)) + if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && + ((pteval & H_PAGE_COMBO) == H_PAGE_COMBO || + (pteval & H_PAGE_4K_PFN) == H_PAGE_4K_PFN)) psize = mmu_io_psize; -#endif + /* check for hashpte */ status = hpte_find(st, addr, psize); @@ -469,9 +467,10 @@ static void walk_linearmapping(struct pg_state *st) static void walk_vmemmap(struct pg_state *st) { -#ifdef CONFIG_SPARSEMEM_VMEMMAP struct vmemmap_backing *ptr = vmemmap_list; + if (!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) + return; /* * Traverse the vmemmaped memory and dump pages that are in the hash * pagetable. @@ -481,7 +480,6 @@ static void walk_vmemmap(struct pg_state *st) ptr = ptr->list; } seq_puts(st->seq, "---[ vmemmap end ]---\n"); -#endif } static void populate_markers(void) @@ -495,11 +493,7 @@ static void populate_markers(void) address_markers[6].start_address = PHB_IO_END; address_markers[7].start_address = IOREMAP_BASE; address_markers[8].start_address = IOREMAP_END; -#ifdef CONFIG_PPC_BOOK3S_64 address_markers[9].start_address = H_VMEMMAP_START; -#else - address_markers[9].start_address = VMEMMAP_BASE; -#endif } static int ptdump_show(struct seq_file *m, void *v) diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 6a88a9f585d4f8..2f9ddc29c53554 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -26,10 +26,6 @@ #include "ptdump.h" -#ifdef CONFIG_PPC32 -#define KERN_VIRT_START PAGE_OFFSET -#endif - /* * To visualise what is happening, * @@ -88,10 +84,6 @@ static struct addr_marker address_markers[] = { #else { 0, "Early I/O remap start" }, { 0, "Early I/O remap end" }, -#ifdef CONFIG_NOT_COHERENT_CACHE - { 0, "Consistent mem start" }, - { 0, "Consistent mem end" }, -#endif #ifdef CONFIG_HIGHMEM { 0, "Highmem PTEs start" }, { 0, "Highmem PTEs end" }, @@ -181,7 +173,7 @@ static void dump_addr(struct pg_state *st, unsigned long addr) static void note_prot_wx(struct pg_state *st, unsigned long addr) { - if (!st->check_wx) + if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) return; if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X))) @@ -299,17 +291,15 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) static void walk_pagetables(struct pg_state *st) { - pgd_t *pgd = pgd_offset_k(0UL); unsigned int i; - unsigned long addr; - - addr = st->start_address; + unsigned long addr = st->start_address & PGDIR_MASK; + pgd_t *pgd = pgd_offset_k(addr); /* * Traverse the linux pagetable structure and dump pages that are in * the hash pagetable. */ - for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { + for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd)) /* pgd exists */ walk_pud(st, pgd, addr); @@ -341,11 +331,6 @@ static void populate_markers(void) #else /* !CONFIG_PPC64 */ address_markers[i++].start_address = ioremap_bot; address_markers[i++].start_address = IOREMAP_TOP; -#ifdef CONFIG_NOT_COHERENT_CACHE - address_markers[i++].start_address = IOREMAP_TOP; - address_markers[i++].start_address = IOREMAP_TOP + - CONFIG_CONSISTENT_SIZE; -#endif #ifdef CONFIG_HIGHMEM address_markers[i++].start_address = PKMAP_BASE; address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP); @@ -364,12 +349,13 @@ static int ptdump_show(struct seq_file *m, void *v) struct pg_state st = { .seq = m, .marker = address_markers, + .start_address = PAGE_OFFSET, }; - if (radix_enabled()) - st.start_address = PAGE_OFFSET; - else +#ifdef CONFIG_PPC64 + if (!radix_enabled()) st.start_address = KERN_VIRT_START; +#endif /* Traverse kernel page tables */ walk_pagetables(&st); @@ -407,12 +393,13 @@ void ptdump_check_wx(void) .seq = NULL, .marker = address_markers, .check_wx = true, + .start_address = PAGE_OFFSET, }; - if (radix_enabled()) - st.start_address = PAGE_OFFSET; - else +#ifdef CONFIG_PPC64 + if (!radix_enabled()) st.start_address = KERN_VIRT_START; +#endif walk_pagetables(&st); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index dea243185ea4bc..cb50a9e1fd2d7a 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size) { int nid, rc = 0, core_id = (cpu / threads_per_core); struct imc_mem_info *mem_info; + struct page *page; /* * alloc_pages_node() will allocate memory for core in the @@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size) mem_info->id = core_id; /* We need only vbase for core counters */ - mem_info->vbase = page_address(alloc_pages_node(nid, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!mem_info->vbase) + page = alloc_pages_node(nid, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + mem_info->vbase = page_address(page); /* Init the mutex */ core_imc_refc[core_id].id = core_id; @@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size) int nid = cpu_to_node(cpu_id); if (!local_mem) { + struct page *page; /* * This case could happen only once at start, since we dont * free the memory in cpu offline path. */ - local_mem = page_address(alloc_pages_node(nid, + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!local_mem) + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + local_mem = page_address(page); per_cpu(thread_imc_mem, cpu_id) = local_mem; } @@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size) int core_id = (cpu_id / threads_per_core); if (!local_mem) { - local_mem = page_address(alloc_pages_node(phys_id, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!local_mem) + struct page *page; + + page = alloc_pages_node(phys_id, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + local_mem = page_address(page); per_cpu(trace_imc_mem, cpu_id) = local_mem; /* Initialise the counters for trace mode */ diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index b369ed4e367547..25ebe634a66109 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -272,14 +272,6 @@ config PPC4xx_GPIO help Enable gpiolib support for ppc440 based boards -config PPC4xx_OCM - bool "PPC4xx On Chip Memory (OCM) support" - depends on 4xx - select PPC_LIB_RHEAP - help - Enable OCM support for PowerPC 4xx platforms with on chip memory, - OCM provides the fast place for memory access to improve performance. - # 44x specific CPU modules, selected based on the board above. config 440EP bool diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile index f5ae27ca131be6..d009d2e0b9e8fa 100644 --- a/arch/powerpc/platforms/4xx/Makefile +++ b/arch/powerpc/platforms/4xx/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += uic.o machine_check.o -obj-$(CONFIG_PPC4xx_OCM) += ocm.o obj-$(CONFIG_4xx_SOC) += soc.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c deleted file mode 100644 index ba3257406cedc0..00000000000000 --- a/arch/powerpc/platforms/4xx/ocm.c +++ /dev/null @@ -1,390 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * PowerPC 4xx OCM memory allocation support - * - * (C) Copyright 2009, Applied Micro Circuits Corporation - * Victor Gallardo (vgallardo@amcc.com) - * - * See file CREDITS for list of people who contributed to this - * project. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#define OCM_DISABLED 0 -#define OCM_ENABLED 1 - -struct ocm_block { - struct list_head list; - void __iomem *addr; - int size; - const char *owner; -}; - -/* non-cached or cached region */ -struct ocm_region { - phys_addr_t phys; - void __iomem *virt; - - int memtotal; - int memfree; - - rh_info_t *rh; - struct list_head list; -}; - -struct ocm_info { - int index; - int status; - int ready; - - phys_addr_t phys; - - int alignment; - int memtotal; - int cache_size; - - struct ocm_region nc; /* non-cached region */ - struct ocm_region c; /* cached region */ -}; - -static struct ocm_info *ocm_nodes; -static int ocm_count; - -static struct ocm_info *ocm_get_node(unsigned int index) -{ - if (index >= ocm_count) { - printk(KERN_ERR "PPC4XX OCM: invalid index"); - return NULL; - } - - return &ocm_nodes[index]; -} - -static int ocm_free_region(struct ocm_region *ocm_reg, const void *addr) -{ - struct ocm_block *blk, *tmp; - unsigned long offset; - - if (!ocm_reg->virt) - return 0; - - list_for_each_entry_safe(blk, tmp, &ocm_reg->list, list) { - if (blk->addr == addr) { - offset = addr - ocm_reg->virt; - ocm_reg->memfree += blk->size; - rh_free(ocm_reg->rh, offset); - list_del(&blk->list); - kfree(blk); - return 1; - } - } - - return 0; -} - -static void __init ocm_init_node(int count, struct device_node *node) -{ - struct ocm_info *ocm; - - const unsigned int *cell_index; - const unsigned int *cache_size; - int len; - - struct resource rsrc; - - ocm = ocm_get_node(count); - - cell_index = of_get_property(node, "cell-index", &len); - if (!cell_index) { - printk(KERN_ERR "PPC4XX OCM: missing cell-index property"); - return; - } - ocm->index = *cell_index; - - if (of_device_is_available(node)) - ocm->status = OCM_ENABLED; - - cache_size = of_get_property(node, "cached-region-size", &len); - if (cache_size) - ocm->cache_size = *cache_size; - - if (of_address_to_resource(node, 0, &rsrc)) { - printk(KERN_ERR "PPC4XX OCM%d: could not get resource address\n", - ocm->index); - return; - } - - ocm->phys = rsrc.start; - ocm->memtotal = (rsrc.end - rsrc.start + 1); - - printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (%s)\n", - ocm->index, ocm->memtotal, - (ocm->status == OCM_DISABLED) ? "disabled" : "enabled"); - - if (ocm->status == OCM_DISABLED) - return; - - /* request region */ - - if (!request_mem_region(ocm->phys, ocm->memtotal, "ppc4xx_ocm")) { - printk(KERN_ERR "PPC4XX OCM%d: could not request region\n", - ocm->index); - return; - } - - /* Configure non-cached and cached regions */ - - ocm->nc.phys = ocm->phys; - ocm->nc.memtotal = ocm->memtotal - ocm->cache_size; - ocm->nc.memfree = ocm->nc.memtotal; - - ocm->c.phys = ocm->phys + ocm->nc.memtotal; - ocm->c.memtotal = ocm->cache_size; - ocm->c.memfree = ocm->c.memtotal; - - if (ocm->nc.memtotal == 0) - ocm->nc.phys = 0; - - if (ocm->c.memtotal == 0) - ocm->c.phys = 0; - - printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (non-cached)\n", - ocm->index, ocm->nc.memtotal); - - printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (cached)\n", - ocm->index, ocm->c.memtotal); - - /* ioremap the non-cached region */ - if (ocm->nc.memtotal) { - ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal, - _PAGE_EXEC | pgprot_val(PAGE_KERNEL_NCG)); - - if (!ocm->nc.virt) { - printk(KERN_ERR - "PPC4XX OCM%d: failed to ioremap non-cached memory\n", - ocm->index); - ocm->nc.memfree = 0; - return; - } - } - - /* ioremap the cached region */ - - if (ocm->c.memtotal) { - ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal, - _PAGE_EXEC | pgprot_val(PAGE_KERNEL)); - - if (!ocm->c.virt) { - printk(KERN_ERR - "PPC4XX OCM%d: failed to ioremap cached memory\n", - ocm->index); - ocm->c.memfree = 0; - return; - } - } - - /* Create Remote Heaps */ - - ocm->alignment = 4; /* default 4 byte alignment */ - - if (ocm->nc.virt) { - ocm->nc.rh = rh_create(ocm->alignment); - rh_attach_region(ocm->nc.rh, 0, ocm->nc.memtotal); - } - - if (ocm->c.virt) { - ocm->c.rh = rh_create(ocm->alignment); - rh_attach_region(ocm->c.rh, 0, ocm->c.memtotal); - } - - INIT_LIST_HEAD(&ocm->nc.list); - INIT_LIST_HEAD(&ocm->c.list); - - ocm->ready = 1; -} - -static int ocm_debugfs_show(struct seq_file *m, void *v) -{ - struct ocm_block *blk, *tmp; - unsigned int i; - - for (i = 0; i < ocm_count; i++) { - struct ocm_info *ocm = ocm_get_node(i); - - if (!ocm || !ocm->ready) - continue; - - seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); - seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys)); - seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); - seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); - seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); - - seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys)); - seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); - seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); - seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); - - list_for_each_entry_safe(blk, tmp, &ocm->nc.list, list) { - seq_printf(m, "NC.MemUsed : %d Bytes (%s)\n", - blk->size, blk->owner); - } - - seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys)); - seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); - seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); - seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); - - list_for_each_entry_safe(blk, tmp, &ocm->c.list, list) { - seq_printf(m, "C.MemUsed : %d Bytes (%s)\n", - blk->size, blk->owner); - } - - seq_putc(m, '\n'); - } - - return 0; -} - -static int ocm_debugfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, ocm_debugfs_show, NULL); -} - -static const struct file_operations ocm_debugfs_fops = { - .open = ocm_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int ocm_debugfs_init(void) -{ - struct dentry *junk; - - junk = debugfs_create_dir("ppc4xx_ocm", 0); - if (!junk) { - printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create dir\n"); - return -1; - } - - if (debugfs_create_file("info", 0644, junk, NULL, &ocm_debugfs_fops)) { - printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create file\n"); - return -1; - } - - return 0; -} - -void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align, - int flags, const char *owner) -{ - void __iomem *addr = NULL; - unsigned long offset; - struct ocm_info *ocm; - struct ocm_region *ocm_reg; - struct ocm_block *ocm_blk; - int i; - - for (i = 0; i < ocm_count; i++) { - ocm = ocm_get_node(i); - - if (!ocm || !ocm->ready) - continue; - - if (flags == PPC4XX_OCM_NON_CACHED) - ocm_reg = &ocm->nc; - else - ocm_reg = &ocm->c; - - if (!ocm_reg->virt) - continue; - - if (align < ocm->alignment) - align = ocm->alignment; - - offset = rh_alloc_align(ocm_reg->rh, size, align, NULL); - - if (IS_ERR_VALUE(offset)) - continue; - - ocm_blk = kzalloc(sizeof(*ocm_blk), GFP_KERNEL); - if (!ocm_blk) { - rh_free(ocm_reg->rh, offset); - break; - } - - *phys = ocm_reg->phys + offset; - addr = ocm_reg->virt + offset; - size = ALIGN(size, align); - - ocm_blk->addr = addr; - ocm_blk->size = size; - ocm_blk->owner = owner; - list_add_tail(&ocm_blk->list, &ocm_reg->list); - - ocm_reg->memfree -= size; - - break; - } - - return addr; -} - -void ppc4xx_ocm_free(const void *addr) -{ - int i; - - if (!addr) - return; - - for (i = 0; i < ocm_count; i++) { - struct ocm_info *ocm = ocm_get_node(i); - - if (!ocm || !ocm->ready) - continue; - - if (ocm_free_region(&ocm->nc, addr) || - ocm_free_region(&ocm->c, addr)) - return; - } -} - -static int __init ppc4xx_ocm_init(void) -{ - struct device_node *np; - int count; - - count = 0; - for_each_compatible_node(np, NULL, "ibm,ocm") - count++; - - if (!count) - return 0; - - ocm_nodes = kzalloc((count * sizeof(struct ocm_info)), GFP_KERNEL); - if (!ocm_nodes) - return -ENOMEM; - - ocm_count = count; - count = 0; - - for_each_compatible_node(np, NULL, "ibm,ocm") { - ocm_init_node(count, np); - count++; - } - - ocm_debugfs_init(); - - return 0; -} - -arch_initcall(ppc4xx_ocm_init); diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index f3fb79fccc721c..d82e3664ffdf86 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -197,7 +197,8 @@ endmenu config PPC601_SYNC_FIX bool "Workarounds for PPC601 bugs" - depends on PPC_BOOK3S_32 && PPC_PMAC + depends on PPC_BOOK3S_601 && PPC_PMAC + default y help Some versions of the PPC601 (the first PowerPC chip) have bugs which mean that extra synchronization instructions are required near diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 56a7c814160d8e..12543e53fa96a5 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -6,6 +6,9 @@ config PPC64 This option selects whether a 32-bit or a 64-bit kernel will be built. +config PPC_BOOK3S_32 + bool + menu "Processor support" choice prompt "Processor Type" @@ -21,13 +24,20 @@ choice If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx. -config PPC_BOOK3S_32 - bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx" +config PPC_BOOK3S_6xx + bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx except 601" + select PPC_BOOK3S_32 select PPC_FPU select PPC_HAVE_PMU_SUPPORT select PPC_HAVE_KUEP select PPC_HAVE_KUAP +config PPC_BOOK3S_601 + bool "PowerPC 601" + select PPC_BOOK3S_32 + select PPC_FPU + select PPC_HAVE_KUAP + config PPC_85xx bool "Freescale 85xx" select E500 @@ -450,8 +460,10 @@ config NOT_COHERENT_CACHE depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \ GAMECUBE_COMMON || AMIGAONE select ARCH_HAS_DMA_COHERENT_TO_PFN + select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_CPU + select DMA_DIRECT_REMAP default n if PPC_47x default y diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 16dfee29aa41e6..ca9ffc1c8685d1 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -486,7 +486,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, window->table.it_size = size >> window->table.it_page_shift; window->table.it_ops = &cell_iommu_ops; - iommu_init_table(&window->table, iommu->nid); + iommu_init_table(&window->table, iommu->nid, 0, 0); pr_debug("\tioid %d\n", window->ioid); pr_debug("\tblocksize %ld\n", window->table.it_blocksize); diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 77fee09104f868..b500a6e47e6b11 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -146,7 +146,7 @@ static void iommu_table_iobmap_setup(void) */ iommu_table_iobmap.it_blocksize = 4; iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops; - iommu_init_table(&iommu_table_iobmap, 0); + iommu_init_table(&iommu_table_iobmap, 0, 0, 0); pr_debug(" <- %s\n", __func__); } diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index 850eee860cf21c..938803eab0ad43 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -12,7 +12,6 @@ config PPC_POWERNV select EPAPR_BOOT select PPC_INDIRECT_PIO select PPC_UDBG_16550 - select PPC_SCOM select ARCH_RANDOM select CPU_FREQ select PPC_DOORBELL @@ -47,3 +46,7 @@ config PPC_VAS VAS adapters are found in POWER9 based systems. If unsure, say N. + +config SCOM_DEBUGFS + bool "Expose SCOM controllers via debugfs" + depends on DEBUG_FS diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index da2e99efbd04e6..a3ac9646119d0b 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -4,15 +4,19 @@ obj-y += idle.o opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o obj-y += opal-kmsg.o opal-powercap.o opal-psr.o opal-sensor-groups.o +obj-y += ultravisor.o obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o +obj-$(CONFIG_FA_DUMP) += opal-fadump.o +obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o +obj-$(CONFIG_OPAL_CORE) += opal-core.o obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_EEH) += eeh-powernv.o -obj-$(CONFIG_PPC_SCOM) += opal-xscom.o obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o obj-$(CONFIG_PERF_EVENTS) += opal-imc.o obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o obj-$(CONFIG_OCXL_BASE) += ocxl.o +obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 620a986209f555..6bc24a47e9ef95 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -34,6 +34,7 @@ #include "powernv.h" #include "pci.h" +#include "../../../../drivers/pci/pci.h" static int eeh_event_irq = -EINVAL; @@ -41,13 +42,10 @@ void pnv_pcibios_bus_add_device(struct pci_dev *pdev) { struct pci_dn *pdn = pci_get_pdn(pdev); - if (!pdev->is_virtfn) + if (eeh_has_flag(EEH_FORCE_DISABLED)) return; - /* - * The following operations will fail if VF's sysfs files - * aren't created or its resources aren't finalized. - */ + dev_dbg(&pdev->dev, "EEH: Setting up device\n"); eeh_add_device_early(pdn); eeh_add_device_late(pdev); eeh_sysfs_add_device(pdev); @@ -199,6 +197,25 @@ PNV_EEH_DBGFS_ENTRY(inbB, 0xE10); #endif /* CONFIG_DEBUG_FS */ +void pnv_eeh_enable_phbs(void) +{ + struct pci_controller *hose; + struct pnv_phb *phb; + + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; + /* + * If EEH is enabled, we're going to rely on that. + * Otherwise, we restore to conventional mechanism + * to clear frozen PE during PCI config access. + */ + if (eeh_enabled()) + phb->flags |= PNV_PHB_FLAG_EEH; + else + phb->flags &= ~PNV_PHB_FLAG_EEH; + } +} + /** * pnv_eeh_post_init - EEH platform dependent post initialization * @@ -213,9 +230,7 @@ int pnv_eeh_post_init(void) struct pnv_phb *phb; int ret = 0; - /* Probe devices & build address cache */ - eeh_probe_devices(); - eeh_addr_cache_build(); + eeh_show_enabled(); /* Register OPAL event notifier */ eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR)); @@ -237,19 +252,11 @@ int pnv_eeh_post_init(void) if (!eeh_enabled()) disable_irq(eeh_event_irq); + pnv_eeh_enable_phbs(); + list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; - /* - * If EEH is enabled, we're going to rely on that. - * Otherwise, we restore to conventional mechanism - * to clear frozen PE during PCI config access. - */ - if (eeh_enabled()) - phb->flags |= PNV_PHB_FLAG_EEH; - else - phb->flags &= ~PNV_PHB_FLAG_EEH; - /* Create debugfs entries */ #ifdef CONFIG_DEBUG_FS if (phb->has_dbgfs || !phb->dbgfs) @@ -377,6 +384,8 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) return NULL; + eeh_edev_dbg(edev, "Probing device\n"); + /* Initialize eeh device */ edev->class_code = pdn->class_code; edev->mode &= 0xFFFFFF00; @@ -402,9 +411,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) /* Create PE */ ret = eeh_add_to_parent_pe(edev); if (ret) { - pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n", - __func__, hose->global_number, pdn->busno, - PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret); + eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret); return NULL; } @@ -453,11 +460,17 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) * Enable EEH explicitly so that we will do EEH check * while accessing I/O stuff */ - eeh_add_flag(EEH_ENABLED); + if (!eeh_has_flag(EEH_ENABLED)) { + enable_irq(eeh_event_irq); + pnv_eeh_enable_phbs(); + eeh_add_flag(EEH_ENABLED); + } /* Save memory bars */ eeh_save_bars(edev); + eeh_edev_dbg(edev, "EEH enabled on device\n"); + return NULL; } @@ -837,7 +850,7 @@ static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option) int aer = edev ? edev->aer_cap : 0; u32 ctrl; - pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n", + pr_debug("%s: Secondary Reset PCI bus %04x:%02x with option %d\n", __func__, pci_domain_nr(dev->bus), dev->bus->number, option); @@ -895,6 +908,10 @@ static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) return __pnv_eeh_bridge_reset(pdev, option); + pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", + __func__, pci_domain_nr(pdev->bus), + pdev->bus->number, option); + switch (option) { case EEH_RESET_FUNDAMENTAL: scope = OPAL_RESET_PCI_FUNDAMENTAL; @@ -1113,17 +1130,37 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option) return -EIO; } + if (pci_is_root_bus(bus)) + return pnv_eeh_root_reset(hose, option); + /* - * If dealing with the root bus (or the bus underneath the - * root port), we reset the bus underneath the root port. + * For hot resets try use the generic PCI error recovery reset + * functions. These correctly handles the case where the secondary + * bus is behind a hotplug slot and it will use the slot provided + * reset methods to prevent spurious hotplug events during the reset. * - * The cxl driver depends on this behaviour for bi-modal card - * switching. + * Fundemental resets need to be handled internally to EEH since the + * PCI core doesn't really have a concept of a fundemental reset, + * mainly because there's no standard way to generate one. Only a + * few devices require an FRESET so it should be fine. */ - if (pci_is_root_bus(bus) || - pci_is_root_bus(bus->parent)) - return pnv_eeh_root_reset(hose, option); + if (option != EEH_RESET_FUNDAMENTAL) { + /* + * NB: Skiboot and pnv_eeh_bridge_reset() also no-op the + * de-assert step. It's like the OPAL reset API was + * poorly designed or something... + */ + if (option == EEH_RESET_DEACTIVATE) + return 0; + rc = pci_bus_error_reset(bus->self); + if (!rc) + return 0; + } + + /* otherwise, use the generic bridge reset. this might call into FW */ + if (pci_is_root_bus(bus->parent)) + return pnv_eeh_root_reset(hose, option); return pnv_eeh_bridge_reset(bus->self, option); } diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 09f49eed7fb86b..78599bca66c22e 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -675,7 +675,8 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) sprs.ptcr = mfspr(SPRN_PTCR); sprs.rpr = mfspr(SPRN_RPR); sprs.tscr = mfspr(SPRN_TSCR); - sprs.ldbar = mfspr(SPRN_LDBAR); + if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) + sprs.ldbar = mfspr(SPRN_LDBAR); sprs_saved = true; @@ -789,7 +790,8 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) mtspr(SPRN_MMCR0, sprs.mmcr0); mtspr(SPRN_MMCR1, sprs.mmcr1); mtspr(SPRN_MMCR2, sprs.mmcr2); - mtspr(SPRN_LDBAR, sprs.ldbar); + if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) + mtspr(SPRN_LDBAR, sprs.ldbar); mtspr(SPRN_SPRG3, local_paca->sprg_vdso); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index c16249d251f129..b95b9e3c4c98e4 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -89,6 +89,7 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) } EXPORT_SYMBOL(pnv_pci_get_npu_dev); +#ifdef CONFIG_IOMMU_API /* * Returns the PE assoicated with the PCI device of the given * NPU. Returns the linked pci device if pci_dev != NULL. @@ -192,106 +193,6 @@ static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num) return 0; } -/* - * Enables 32 bit DMA on NPU. - */ -static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) -{ - struct pci_dev *gpdev; - struct pnv_ioda_pe *gpe; - int64_t rc; - - /* - * Find the assoicated PCI devices and get the dma window - * information from there. - */ - if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV)) - return; - - gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); - if (!gpe) - return; - - rc = pnv_npu_set_window(&npe->table_group, 0, - gpe->table_group.tables[0]); - - /* - * NVLink devices use the same TCE table configuration as - * their parent device so drivers shouldn't be doing DMA - * operations directly on these devices. - */ - set_dma_ops(&npe->pdev->dev, &dma_dummy_ops); -} - -/* - * Enables bypass mode on the NPU. The NPU only supports one - * window per link, so bypass needs to be explicitly enabled or - * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be - * active at the same time. - */ -static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) -{ - struct pnv_phb *phb = npe->phb; - int64_t rc = 0; - phys_addr_t top = memblock_end_of_DRAM(); - - if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev) - return -EINVAL; - - rc = pnv_npu_unset_window(&npe->table_group, 0); - if (rc != OPAL_SUCCESS) - return rc; - - /* Enable the bypass window */ - - top = roundup_pow_of_two(top); - dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n", - npe->pe_number); - rc = opal_pci_map_pe_dma_window_real(phb->opal_id, - npe->pe_number, npe->pe_number, - 0 /* bypass base */, top); - - if (rc == OPAL_SUCCESS) - pnv_pci_ioda2_tce_invalidate_entire(phb, false); - - return rc; -} - -void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass) -{ - int i; - struct pnv_phb *phb; - struct pci_dn *pdn; - struct pnv_ioda_pe *npe; - struct pci_dev *npdev; - - for (i = 0; ; ++i) { - npdev = pnv_pci_get_npu_dev(gpdev, i); - - if (!npdev) - break; - - pdn = pci_get_pdn(npdev); - if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) - return; - - phb = pci_bus_to_host(npdev->bus)->private_data; - - /* We only do bypass if it's enabled on the linked device */ - npe = &phb->ioda.pe_array[pdn->pe_number]; - - if (bypass) { - dev_info(&npdev->dev, - "Using 64-bit DMA iommu bypass\n"); - pnv_npu_dma_set_bypass(npe); - } else { - dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n"); - pnv_npu_dma_set_32(npe); - } - } -} - -#ifdef CONFIG_IOMMU_API /* Switch ownership from platform code to external user (e.g. VFIO) */ static void pnv_npu_take_ownership(struct iommu_table_group *table_group) { diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c index 29ca523c1c7992..a2aa5e433ac84d 100644 --- a/arch/powerpc/platforms/powernv/opal-call.c +++ b/arch/powerpc/platforms/powernv/opal-call.c @@ -257,7 +257,7 @@ OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO); OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE); OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK); OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK); -OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ); +OPAL_CALL(opal_xive_allocate_irq_raw, OPAL_XIVE_ALLOCATE_IRQ); OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ); OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO); OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO); @@ -287,3 +287,6 @@ OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR); OPAL_CALL(opal_sensor_read_u64, OPAL_SENSOR_READ_U64); OPAL_CALL(opal_sensor_group_enable, OPAL_SENSOR_GROUP_ENABLE); OPAL_CALL(opal_nx_coproc_init, OPAL_NX_COPROC_INIT); +OPAL_CALL(opal_mpipl_update, OPAL_MPIPL_UPDATE); +OPAL_CALL(opal_mpipl_register_tag, OPAL_MPIPL_REGISTER_TAG); +OPAL_CALL(opal_mpipl_query_tag, OPAL_MPIPL_QUERY_TAG); diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c new file mode 100644 index 00000000000000..ed895d82c048de --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -0,0 +1,636 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Interface for exporting the OPAL ELF core. + * Heavily inspired from fs/proc/vmcore.c + * + * Copyright 2019, Hari Bathini, IBM Corporation. + */ + +#define pr_fmt(fmt) "opal core: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "opal-fadump.h" + +#define MAX_PT_LOAD_CNT 8 + +/* NT_AUXV note related info */ +#define AUXV_CNT 1 +#define AUXV_DESC_SZ (((2 * AUXV_CNT) + 1) * sizeof(Elf64_Off)) + +struct opalcore_config { + u32 num_cpus; + /* PIR value of crashing CPU */ + u32 crashing_cpu; + + /* CPU state data info from F/W */ + u64 cpu_state_destination_vaddr; + u64 cpu_state_data_size; + u64 cpu_state_entry_size; + + /* OPAL memory to be exported as PT_LOAD segments */ + u64 ptload_addr[MAX_PT_LOAD_CNT]; + u64 ptload_size[MAX_PT_LOAD_CNT]; + u64 ptload_cnt; + + /* Pointer to the first PT_LOAD in the ELF core file */ + Elf64_Phdr *ptload_phdr; + + /* Total size of opalcore file. */ + size_t opalcore_size; + + /* Buffer for all the ELF core headers and the PT_NOTE */ + size_t opalcorebuf_sz; + char *opalcorebuf; + + /* NT_AUXV buffer */ + char auxv_buf[AUXV_DESC_SZ]; +}; + +struct opalcore { + struct list_head list; + u64 paddr; + size_t size; + loff_t offset; +}; + +static LIST_HEAD(opalcore_list); +static struct opalcore_config *oc_conf; +static const struct opal_mpipl_fadump *opalc_metadata; +static const struct opal_mpipl_fadump *opalc_cpu_metadata; + +/* + * Set crashing CPU's signal to SIGUSR1. if the kernel is triggered + * by kernel, SIGTERM otherwise. + */ +bool kernel_initiated; + +static struct opalcore * __init get_new_element(void) +{ + return kzalloc(sizeof(struct opalcore), GFP_KERNEL); +} + +static inline int is_opalcore_usable(void) +{ + return (oc_conf && oc_conf->opalcorebuf != NULL) ? 1 : 0; +} + +static Elf64_Word *append_elf64_note(Elf64_Word *buf, char *name, + u32 type, void *data, + size_t data_len) +{ + Elf64_Nhdr *note = (Elf64_Nhdr *)buf; + Elf64_Word namesz = strlen(name) + 1; + + note->n_namesz = cpu_to_be32(namesz); + note->n_descsz = cpu_to_be32(data_len); + note->n_type = cpu_to_be32(type); + buf += DIV_ROUND_UP(sizeof(*note), sizeof(Elf64_Word)); + memcpy(buf, name, namesz); + buf += DIV_ROUND_UP(namesz, sizeof(Elf64_Word)); + memcpy(buf, data, data_len); + buf += DIV_ROUND_UP(data_len, sizeof(Elf64_Word)); + + return buf; +} + +static void fill_prstatus(struct elf_prstatus *prstatus, int pir, + struct pt_regs *regs) +{ + memset(prstatus, 0, sizeof(struct elf_prstatus)); + elf_core_copy_kernel_regs(&(prstatus->pr_reg), regs); + + /* + * Overload PID with PIR value. + * As a PIR value could also be '0', add an offset of '100' + * to every PIR to avoid misinterpretations in GDB. + */ + prstatus->pr_pid = cpu_to_be32(100 + pir); + prstatus->pr_ppid = cpu_to_be32(1); + + /* + * Indicate SIGUSR1 for crash initiated from kernel. + * SIGTERM otherwise. + */ + if (pir == oc_conf->crashing_cpu) { + short sig; + + sig = kernel_initiated ? SIGUSR1 : SIGTERM; + prstatus->pr_cursig = cpu_to_be16(sig); + } +} + +static Elf64_Word *auxv_to_elf64_notes(Elf64_Word *buf, + u64 opal_boot_entry) +{ + Elf64_Off *bufp = (Elf64_Off *)oc_conf->auxv_buf; + int idx = 0; + + memset(bufp, 0, AUXV_DESC_SZ); + + /* Entry point of OPAL */ + bufp[idx++] = cpu_to_be64(AT_ENTRY); + bufp[idx++] = cpu_to_be64(opal_boot_entry); + + /* end of vector */ + bufp[idx++] = cpu_to_be64(AT_NULL); + + buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV, + oc_conf->auxv_buf, AUXV_DESC_SZ); + return buf; +} + +/* + * Read from the ELF header and then the crash dump. + * Returns number of bytes read on success, -errno on failure. + */ +static ssize_t read_opalcore(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *to, + loff_t pos, size_t count) +{ + struct opalcore *m; + ssize_t tsz, avail; + loff_t tpos = pos; + + if (pos >= oc_conf->opalcore_size) + return 0; + + /* Adjust count if it goes beyond opalcore size */ + avail = oc_conf->opalcore_size - pos; + if (count > avail) + count = avail; + + if (count == 0) + return 0; + + /* Read ELF core header and/or PT_NOTE segment */ + if (tpos < oc_conf->opalcorebuf_sz) { + tsz = min_t(size_t, oc_conf->opalcorebuf_sz - tpos, count); + memcpy(to, oc_conf->opalcorebuf + tpos, tsz); + to += tsz; + tpos += tsz; + count -= tsz; + } + + list_for_each_entry(m, &opalcore_list, list) { + /* nothing more to read here */ + if (count == 0) + break; + + if (tpos < m->offset + m->size) { + void *addr; + + tsz = min_t(size_t, m->offset + m->size - tpos, count); + addr = (void *)(m->paddr + tpos - m->offset); + memcpy(to, __va(addr), tsz); + to += tsz; + tpos += tsz; + count -= tsz; + } + } + + return (tpos - pos); +} + +static struct bin_attribute opal_core_attr = { + .attr = {.name = "core", .mode = 0400}, + .read = read_opalcore +}; + +/* + * Read CPU state dump data and convert it into ELF notes. + * + * Each register entry is of 16 bytes, A numerical identifier along with + * a GPR/SPR flag in the first 8 bytes and the register value in the next + * 8 bytes. For more details refer to F/W documentation. + */ +static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) +{ + u32 thread_pir, size_per_thread, regs_offset, regs_cnt, reg_esize; + struct hdat_fadump_thread_hdr *thdr; + struct elf_prstatus prstatus; + Elf64_Word *first_cpu_note; + struct pt_regs regs; + char *bufp; + int i; + + size_per_thread = oc_conf->cpu_state_entry_size; + bufp = __va(oc_conf->cpu_state_destination_vaddr); + + /* + * Offset for register entries, entry size and registers count is + * duplicated in every thread header in keeping with HDAT format. + * Use these values from the first thread header. + */ + thdr = (struct hdat_fadump_thread_hdr *)bufp; + regs_offset = (offsetof(struct hdat_fadump_thread_hdr, offset) + + be32_to_cpu(thdr->offset)); + reg_esize = be32_to_cpu(thdr->esize); + regs_cnt = be32_to_cpu(thdr->ecnt); + + pr_debug("--------CPU State Data------------\n"); + pr_debug("NumCpus : %u\n", oc_conf->num_cpus); + pr_debug("\tOffset: %u, Entry size: %u, Cnt: %u\n", + regs_offset, reg_esize, regs_cnt); + + /* + * Skip past the first CPU note. Fill this note with the + * crashing CPU's prstatus. + */ + first_cpu_note = buf; + buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + &prstatus, sizeof(prstatus)); + + for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) { + thdr = (struct hdat_fadump_thread_hdr *)bufp; + thread_pir = be32_to_cpu(thdr->pir); + + pr_debug("[%04d] PIR: 0x%x, core state: 0x%02x\n", + i, thread_pir, thdr->core_state); + + /* + * Register state data of MAX cores is provided by firmware, + * but some of this cores may not be active. So, while + * processing register state data, check core state and + * skip threads that belong to inactive cores. + */ + if (thdr->core_state == HDAT_FADUMP_CORE_INACTIVE) + continue; + + opal_fadump_read_regs((bufp + regs_offset), regs_cnt, + reg_esize, false, ®s); + + pr_debug("PIR 0x%x - R1 : 0x%llx, NIP : 0x%llx\n", thread_pir, + be64_to_cpu(regs.gpr[1]), be64_to_cpu(regs.nip)); + fill_prstatus(&prstatus, thread_pir, ®s); + + if (thread_pir != oc_conf->crashing_cpu) { + buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, + NT_PRSTATUS, &prstatus, + sizeof(prstatus)); + } else { + /* + * Add crashing CPU as the first NT_PRSTATUS note for + * GDB to process the core file appropriately. + */ + append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME, + NT_PRSTATUS, &prstatus, + sizeof(prstatus)); + } + } + + return buf; +} + +static int __init create_opalcore(void) +{ + u64 opal_boot_entry, opal_base_addr, paddr; + u32 hdr_size, cpu_notes_size, count; + struct device_node *dn; + struct opalcore *new; + loff_t opalcore_off; + struct page *page; + Elf64_Phdr *phdr; + Elf64_Ehdr *elf; + int i, ret; + char *bufp; + + /* Get size of header & CPU notes for OPAL core */ + hdr_size = (sizeof(Elf64_Ehdr) + + ((oc_conf->ptload_cnt + 1) * sizeof(Elf64_Phdr))); + cpu_notes_size = ((oc_conf->num_cpus * (CRASH_CORE_NOTE_HEAD_BYTES + + CRASH_CORE_NOTE_NAME_BYTES + + CRASH_CORE_NOTE_DESC_BYTES)) + + (CRASH_CORE_NOTE_HEAD_BYTES + + CRASH_CORE_NOTE_NAME_BYTES + AUXV_DESC_SZ)); + + /* Allocate buffer to setup OPAL core */ + oc_conf->opalcorebuf_sz = PAGE_ALIGN(hdr_size + cpu_notes_size); + oc_conf->opalcorebuf = alloc_pages_exact(oc_conf->opalcorebuf_sz, + GFP_KERNEL | __GFP_ZERO); + if (!oc_conf->opalcorebuf) { + pr_err("Not enough memory to setup OPAL core (size: %lu)\n", + oc_conf->opalcorebuf_sz); + oc_conf->opalcorebuf_sz = 0; + return -ENOMEM; + } + count = oc_conf->opalcorebuf_sz / PAGE_SIZE; + page = virt_to_page(oc_conf->opalcorebuf); + for (i = 0; i < count; i++) + mark_page_reserved(page + i); + + pr_debug("opalcorebuf = 0x%llx\n", (u64)oc_conf->opalcorebuf); + + /* Read OPAL related device-tree entries */ + dn = of_find_node_by_name(NULL, "ibm,opal"); + if (dn) { + ret = of_property_read_u64(dn, "opal-base-address", + &opal_base_addr); + pr_debug("opal-base-address: %llx\n", opal_base_addr); + ret |= of_property_read_u64(dn, "opal-boot-address", + &opal_boot_entry); + pr_debug("opal-boot-address: %llx\n", opal_boot_entry); + } + if (!dn || ret) + pr_warn("WARNING: Failed to read OPAL base & entry values\n"); + + /* Use count to keep track of the program headers */ + count = 0; + + bufp = oc_conf->opalcorebuf; + elf = (Elf64_Ehdr *)bufp; + bufp += sizeof(Elf64_Ehdr); + memcpy(elf->e_ident, ELFMAG, SELFMAG); + elf->e_ident[EI_CLASS] = ELF_CLASS; + elf->e_ident[EI_DATA] = ELFDATA2MSB; + elf->e_ident[EI_VERSION] = EV_CURRENT; + elf->e_ident[EI_OSABI] = ELF_OSABI; + memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); + elf->e_type = cpu_to_be16(ET_CORE); + elf->e_machine = cpu_to_be16(ELF_ARCH); + elf->e_version = cpu_to_be32(EV_CURRENT); + elf->e_entry = 0; + elf->e_phoff = cpu_to_be64(sizeof(Elf64_Ehdr)); + elf->e_shoff = 0; + elf->e_flags = 0; + + elf->e_ehsize = cpu_to_be16(sizeof(Elf64_Ehdr)); + elf->e_phentsize = cpu_to_be16(sizeof(Elf64_Phdr)); + elf->e_phnum = 0; + elf->e_shentsize = 0; + elf->e_shnum = 0; + elf->e_shstrndx = 0; + + phdr = (Elf64_Phdr *)bufp; + bufp += sizeof(Elf64_Phdr); + phdr->p_type = cpu_to_be32(PT_NOTE); + phdr->p_flags = 0; + phdr->p_align = 0; + phdr->p_paddr = phdr->p_vaddr = 0; + phdr->p_offset = cpu_to_be64(hdr_size); + phdr->p_filesz = phdr->p_memsz = cpu_to_be64(cpu_notes_size); + count++; + + opalcore_off = oc_conf->opalcorebuf_sz; + oc_conf->ptload_phdr = (Elf64_Phdr *)bufp; + paddr = 0; + for (i = 0; i < oc_conf->ptload_cnt; i++) { + phdr = (Elf64_Phdr *)bufp; + bufp += sizeof(Elf64_Phdr); + phdr->p_type = cpu_to_be32(PT_LOAD); + phdr->p_flags = cpu_to_be32(PF_R|PF_W|PF_X); + phdr->p_align = 0; + + new = get_new_element(); + if (!new) + return -ENOMEM; + new->paddr = oc_conf->ptload_addr[i]; + new->size = oc_conf->ptload_size[i]; + new->offset = opalcore_off; + list_add_tail(&new->list, &opalcore_list); + + phdr->p_paddr = cpu_to_be64(paddr); + phdr->p_vaddr = cpu_to_be64(opal_base_addr + paddr); + phdr->p_filesz = phdr->p_memsz = + cpu_to_be64(oc_conf->ptload_size[i]); + phdr->p_offset = cpu_to_be64(opalcore_off); + + count++; + opalcore_off += oc_conf->ptload_size[i]; + paddr += oc_conf->ptload_size[i]; + } + + elf->e_phnum = cpu_to_be16(count); + + bufp = (char *)opalcore_append_cpu_notes((Elf64_Word *)bufp); + bufp = (char *)auxv_to_elf64_notes((Elf64_Word *)bufp, opal_boot_entry); + + oc_conf->opalcore_size = opalcore_off; + return 0; +} + +static void opalcore_cleanup(void) +{ + if (oc_conf == NULL) + return; + + /* Remove OPAL core sysfs file */ + sysfs_remove_bin_file(opal_kobj, &opal_core_attr); + oc_conf->ptload_phdr = NULL; + oc_conf->ptload_cnt = 0; + + /* free the buffer used for setting up OPAL core */ + if (oc_conf->opalcorebuf) { + void *end = (void *)((u64)oc_conf->opalcorebuf + + oc_conf->opalcorebuf_sz); + + free_reserved_area(oc_conf->opalcorebuf, end, -1, NULL); + oc_conf->opalcorebuf = NULL; + oc_conf->opalcorebuf_sz = 0; + } + + kfree(oc_conf); + oc_conf = NULL; +} +__exitcall(opalcore_cleanup); + +static void __init opalcore_config_init(void) +{ + u32 idx, cpu_data_version; + struct device_node *np; + const __be32 *prop; + u64 addr = 0; + int i, ret; + + np = of_find_node_by_path("/ibm,opal/dump"); + if (np == NULL) + return; + + if (!of_device_is_compatible(np, "ibm,opal-dump")) { + pr_warn("Support missing for this f/w version!\n"); + return; + } + + /* Check if dump has been initiated on last reboot */ + prop = of_get_property(np, "mpipl-boot", NULL); + if (!prop) { + of_node_put(np); + return; + } + + /* Get OPAL metadata */ + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_OPAL, &addr); + if ((ret != OPAL_SUCCESS) || !addr) { + pr_err("Failed to get OPAL metadata (%d)\n", ret); + goto error_out; + } + + addr = be64_to_cpu(addr); + pr_debug("OPAL metadata addr: %llx\n", addr); + opalc_metadata = __va(addr); + + /* Get OPAL CPU metadata */ + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr); + if ((ret != OPAL_SUCCESS) || !addr) { + pr_err("Failed to get OPAL CPU metadata (%d)\n", ret); + goto error_out; + } + + addr = be64_to_cpu(addr); + pr_debug("CPU metadata addr: %llx\n", addr); + opalc_cpu_metadata = __va(addr); + + /* Allocate memory for config buffer */ + oc_conf = kzalloc(sizeof(struct opalcore_config), GFP_KERNEL); + if (oc_conf == NULL) + goto error_out; + + /* Parse OPAL metadata */ + if (opalc_metadata->version != OPAL_MPIPL_VERSION) { + pr_warn("Supported OPAL metadata version: %u, found: %u!\n", + OPAL_MPIPL_VERSION, opalc_metadata->version); + pr_warn("WARNING: F/W using newer OPAL metadata format!!\n"); + } + + oc_conf->ptload_cnt = 0; + idx = be32_to_cpu(opalc_metadata->region_cnt); + if (idx > MAX_PT_LOAD_CNT) { + pr_warn("WARNING: OPAL regions count (%d) adjusted to limit (%d)", + MAX_PT_LOAD_CNT, idx); + idx = MAX_PT_LOAD_CNT; + } + for (i = 0; i < idx; i++) { + oc_conf->ptload_addr[oc_conf->ptload_cnt] = + be64_to_cpu(opalc_metadata->region[i].dest); + oc_conf->ptload_size[oc_conf->ptload_cnt++] = + be64_to_cpu(opalc_metadata->region[i].size); + } + oc_conf->ptload_cnt = i; + oc_conf->crashing_cpu = be32_to_cpu(opalc_metadata->crashing_pir); + + if (!oc_conf->ptload_cnt) { + pr_err("OPAL memory regions not found\n"); + goto error_out; + } + + /* Parse OPAL CPU metadata */ + cpu_data_version = be32_to_cpu(opalc_cpu_metadata->cpu_data_version); + if (cpu_data_version != HDAT_FADUMP_CPU_DATA_VER) { + pr_warn("Supported CPU data version: %u, found: %u!\n", + HDAT_FADUMP_CPU_DATA_VER, cpu_data_version); + pr_warn("WARNING: F/W using newer CPU state data format!!\n"); + } + + addr = be64_to_cpu(opalc_cpu_metadata->region[0].dest); + if (!addr) { + pr_err("CPU state data not found!\n"); + goto error_out; + } + oc_conf->cpu_state_destination_vaddr = (u64)__va(addr); + + oc_conf->cpu_state_data_size = + be64_to_cpu(opalc_cpu_metadata->region[0].size); + oc_conf->cpu_state_entry_size = + be32_to_cpu(opalc_cpu_metadata->cpu_data_size); + + if ((oc_conf->cpu_state_entry_size == 0) || + (oc_conf->cpu_state_entry_size > oc_conf->cpu_state_data_size)) { + pr_err("CPU state data is invalid.\n"); + goto error_out; + } + oc_conf->num_cpus = (oc_conf->cpu_state_data_size / + oc_conf->cpu_state_entry_size); + + of_node_put(np); + return; + +error_out: + pr_err("Could not export /sys/firmware/opal/core\n"); + opalcore_cleanup(); + of_node_put(np); +} + +static ssize_t fadump_release_opalcore_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int input = -1; + + if (kstrtoint(buf, 0, &input)) + return -EINVAL; + + if (input == 1) { + if (oc_conf == NULL) { + pr_err("'/sys/firmware/opal/core' file not accessible!\n"); + return -EPERM; + } + + /* + * Take away '/sys/firmware/opal/core' and release all memory + * used for exporting this file. + */ + opalcore_cleanup(); + } else + return -EINVAL; + + return count; +} + +static struct kobj_attribute opalcore_rel_attr = __ATTR(fadump_release_opalcore, + 0200, NULL, + fadump_release_opalcore_store); + +static int __init opalcore_init(void) +{ + int rc = -1; + + opalcore_config_init(); + + if (oc_conf == NULL) + return rc; + + create_opalcore(); + + /* + * If oc_conf->opalcorebuf= is set in the 2nd kernel, + * then capture the dump. + */ + if (!(is_opalcore_usable())) { + pr_err("Failed to export /sys/firmware/opal/core\n"); + opalcore_cleanup(); + return rc; + } + + /* Set OPAL core file size */ + opal_core_attr.size = oc_conf->opalcore_size; + + /* Export OPAL core sysfs file */ + rc = sysfs_create_bin_file(opal_kobj, &opal_core_attr); + if (rc != 0) { + pr_err("Failed to export /sys/firmware/opal/core\n"); + opalcore_cleanup(); + return rc; + } + + rc = sysfs_create_file(kernel_kobj, &opalcore_rel_attr.attr); + if (rc) { + pr_warn("unable to create sysfs file fadump_release_opalcore (%d)\n", + rc); + } + + return 0; +} +fs_initcall(opalcore_init); diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c new file mode 100644 index 00000000000000..d361d37d975f31 --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-fadump.c @@ -0,0 +1,716 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Firmware-Assisted Dump support on POWER platform (OPAL). + * + * Copyright 2019, Hari Bathini, IBM Corporation. + */ + +#define pr_fmt(fmt) "opal fadump: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "opal-fadump.h" + + +#ifdef CONFIG_PRESERVE_FA_DUMP +/* + * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel, + * ensure crash data is preserved in hope that the subsequent memory + * preserving kernel boot is going to process this crash data. + */ +void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) +{ + const struct opal_fadump_mem_struct *opal_fdm_active; + const __be32 *prop; + unsigned long dn; + u64 addr = 0; + s64 ret; + + dn = of_get_flat_dt_subnode_by_name(node, "dump"); + if (dn == -FDT_ERR_NOTFOUND) + return; + + /* + * Check if dump has been initiated on last reboot. + */ + prop = of_get_flat_dt_prop(dn, "mpipl-boot", NULL); + if (!prop) + return; + + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr); + if ((ret != OPAL_SUCCESS) || !addr) { + pr_debug("Could not get Kernel metadata (%lld)\n", ret); + return; + } + + /* + * Preserve memory only if kernel memory regions are registered + * with f/w for MPIPL. + */ + addr = be64_to_cpu(addr); + pr_debug("Kernel metadata addr: %llx\n", addr); + opal_fdm_active = (void *)addr; + if (opal_fdm_active->registered_regions == 0) + return; + + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr); + if ((ret != OPAL_SUCCESS) || !addr) { + pr_err("Failed to get boot memory tag (%lld)\n", ret); + return; + } + + /* + * Memory below this address can be used for booting a + * capture kernel or petitboot kernel. Preserve everything + * above this address for processing crashdump. + */ + fadump_conf->boot_mem_top = be64_to_cpu(addr); + pr_debug("Preserve everything above %llx\n", fadump_conf->boot_mem_top); + + pr_info("Firmware-assisted dump is active.\n"); + fadump_conf->dump_active = 1; +} + +#else /* CONFIG_PRESERVE_FA_DUMP */ +static const struct opal_fadump_mem_struct *opal_fdm_active; +static const struct opal_mpipl_fadump *opal_cpu_metadata; +static struct opal_fadump_mem_struct *opal_fdm; + +#ifdef CONFIG_OPAL_CORE +extern bool kernel_initiated; +#endif + +static int opal_fadump_unregister(struct fw_dump *fadump_conf); + +static void opal_fadump_update_config(struct fw_dump *fadump_conf, + const struct opal_fadump_mem_struct *fdm) +{ + pr_debug("Boot memory regions count: %d\n", fdm->region_cnt); + + /* + * The destination address of the first boot memory region is the + * destination address of boot memory regions. + */ + fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest; + pr_debug("Destination address of boot memory regions: %#016llx\n", + fadump_conf->boot_mem_dest_addr); + + fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr; +} + +/* + * This function is called in the capture kernel to get configuration details + * from metadata setup by the first kernel. + */ +static void opal_fadump_get_config(struct fw_dump *fadump_conf, + const struct opal_fadump_mem_struct *fdm) +{ + unsigned long base, size, last_end, hole_size; + int i; + + if (!fadump_conf->dump_active) + return; + + last_end = 0; + hole_size = 0; + fadump_conf->boot_memory_size = 0; + + pr_debug("Boot memory regions:\n"); + for (i = 0; i < fdm->region_cnt; i++) { + base = fdm->rgn[i].src; + size = fdm->rgn[i].size; + pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); + + fadump_conf->boot_mem_addr[i] = base; + fadump_conf->boot_mem_sz[i] = size; + fadump_conf->boot_memory_size += size; + hole_size += (base - last_end); + + last_end = base + size; + } + + /* + * Start address of reserve dump area (permanent reservation) for + * re-registering FADump after dump capture. + */ + fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest; + + /* + * Rarely, but it can so happen that system crashes before all + * boot memory regions are registered for MPIPL. In such + * cases, warn that the vmcore may not be accurate and proceed + * anyway as that is the best bet considering free pages, cache + * pages, user pages, etc are usually filtered out. + * + * Hope the memory that could not be preserved only has pages + * that are usually filtered out while saving the vmcore. + */ + if (fdm->region_cnt > fdm->registered_regions) { + pr_warn("Not all memory regions were saved!!!\n"); + pr_warn(" Unsaved memory regions:\n"); + i = fdm->registered_regions; + while (i < fdm->region_cnt) { + pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n", + i, fdm->rgn[i].src, fdm->rgn[i].size); + i++; + } + + pr_warn("If the unsaved regions only contain pages that are filtered out (eg. free/user pages), the vmcore should still be usable.\n"); + pr_warn("WARNING: If the unsaved regions contain kernel pages, the vmcore will be corrupted.\n"); + } + + fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size); + fadump_conf->boot_mem_regs_cnt = fdm->region_cnt; + opal_fadump_update_config(fadump_conf, fdm); +} + +/* Initialize kernel metadata */ +static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm) +{ + fdm->version = OPAL_FADUMP_VERSION; + fdm->region_cnt = 0; + fdm->registered_regions = 0; + fdm->fadumphdr_addr = 0; +} + +static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf) +{ + u64 addr = fadump_conf->reserve_dump_area_start; + int i; + + opal_fdm = __va(fadump_conf->kernel_metadata); + opal_fadump_init_metadata(opal_fdm); + + /* Boot memory regions */ + for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { + opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i]; + opal_fdm->rgn[i].dest = addr; + opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i]; + + opal_fdm->region_cnt++; + addr += fadump_conf->boot_mem_sz[i]; + } + + /* + * Kernel metadata is passed to f/w and retrieved in capture kerenl. + * So, use it to save fadump header address instead of calculating it. + */ + opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest + + fadump_conf->boot_memory_size); + + opal_fadump_update_config(fadump_conf, opal_fdm); + + return addr; +} + +static u64 opal_fadump_get_metadata_size(void) +{ + return PAGE_ALIGN(sizeof(struct opal_fadump_mem_struct)); +} + +static int opal_fadump_setup_metadata(struct fw_dump *fadump_conf) +{ + int err = 0; + s64 ret; + + /* + * Use the last page(s) in FADump memory reservation for + * kernel metadata. + */ + fadump_conf->kernel_metadata = (fadump_conf->reserve_dump_area_start + + fadump_conf->reserve_dump_area_size - + opal_fadump_get_metadata_size()); + pr_info("Kernel metadata addr: %llx\n", fadump_conf->kernel_metadata); + + /* Initialize kernel metadata before registering the address with f/w */ + opal_fdm = __va(fadump_conf->kernel_metadata); + opal_fadump_init_metadata(opal_fdm); + + /* + * Register metadata address with f/w. Can be retrieved in + * the capture kernel. + */ + ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_KERNEL, + fadump_conf->kernel_metadata); + if (ret != OPAL_SUCCESS) { + pr_err("Failed to set kernel metadata tag!\n"); + err = -EPERM; + } + + /* + * Register boot memory top address with f/w. Should be retrieved + * by a kernel that intends to preserve crash'ed kernel's memory. + */ + ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_BOOT_MEM, + fadump_conf->boot_mem_top); + if (ret != OPAL_SUCCESS) { + pr_err("Failed to set boot memory tag!\n"); + err = -EPERM; + } + + return err; +} + +static u64 opal_fadump_get_bootmem_min(void) +{ + return OPAL_FADUMP_MIN_BOOT_MEM; +} + +static int opal_fadump_register(struct fw_dump *fadump_conf) +{ + s64 rc = OPAL_PARAMETER; + int i, err = -EIO; + + for (i = 0; i < opal_fdm->region_cnt; i++) { + rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE, + opal_fdm->rgn[i].src, + opal_fdm->rgn[i].dest, + opal_fdm->rgn[i].size); + if (rc != OPAL_SUCCESS) + break; + + opal_fdm->registered_regions++; + } + + switch (rc) { + case OPAL_SUCCESS: + pr_info("Registration is successful!\n"); + fadump_conf->dump_registered = 1; + err = 0; + break; + case OPAL_RESOURCE: + /* If MAX regions limit in f/w is hit, warn and proceed. */ + pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n", + (opal_fdm->region_cnt - opal_fdm->registered_regions)); + fadump_conf->dump_registered = 1; + err = 0; + break; + case OPAL_PARAMETER: + pr_err("Failed to register. Parameter Error(%lld).\n", rc); + break; + case OPAL_HARDWARE: + pr_err("Support not available.\n"); + fadump_conf->fadump_supported = 0; + fadump_conf->fadump_enabled = 0; + break; + default: + pr_err("Failed to register. Unknown Error(%lld).\n", rc); + break; + } + + /* + * If some regions were registered before OPAL_MPIPL_ADD_RANGE + * OPAL call failed, unregister all regions. + */ + if ((err < 0) && (opal_fdm->registered_regions > 0)) + opal_fadump_unregister(fadump_conf); + + return err; +} + +static int opal_fadump_unregister(struct fw_dump *fadump_conf) +{ + s64 rc; + + rc = opal_mpipl_update(OPAL_MPIPL_REMOVE_ALL, 0, 0, 0); + if (rc) { + pr_err("Failed to un-register - unexpected Error(%lld).\n", rc); + return -EIO; + } + + opal_fdm->registered_regions = 0; + fadump_conf->dump_registered = 0; + return 0; +} + +static int opal_fadump_invalidate(struct fw_dump *fadump_conf) +{ + s64 rc; + + rc = opal_mpipl_update(OPAL_MPIPL_FREE_PRESERVED_MEMORY, 0, 0, 0); + if (rc) { + pr_err("Failed to invalidate - unexpected Error(%lld).\n", rc); + return -EIO; + } + + fadump_conf->dump_active = 0; + opal_fdm_active = NULL; + return 0; +} + +static void opal_fadump_cleanup(struct fw_dump *fadump_conf) +{ + s64 ret; + + ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_KERNEL, 0); + if (ret != OPAL_SUCCESS) + pr_warn("Could not reset (%llu) kernel metadata tag!\n", ret); +} + +/* + * Verify if CPU state data is available. If available, do a bit of sanity + * checking before processing this data. + */ +static bool __init is_opal_fadump_cpu_data_valid(struct fw_dump *fadump_conf) +{ + if (!opal_cpu_metadata) + return false; + + fadump_conf->cpu_state_data_version = + be32_to_cpu(opal_cpu_metadata->cpu_data_version); + fadump_conf->cpu_state_entry_size = + be32_to_cpu(opal_cpu_metadata->cpu_data_size); + fadump_conf->cpu_state_dest_vaddr = + (u64)__va(be64_to_cpu(opal_cpu_metadata->region[0].dest)); + fadump_conf->cpu_state_data_size = + be64_to_cpu(opal_cpu_metadata->region[0].size); + + if (fadump_conf->cpu_state_data_version != HDAT_FADUMP_CPU_DATA_VER) { + pr_warn("Supported CPU state data version: %u, found: %d!\n", + HDAT_FADUMP_CPU_DATA_VER, + fadump_conf->cpu_state_data_version); + pr_warn("WARNING: F/W using newer CPU state data format!!\n"); + } + + if ((fadump_conf->cpu_state_dest_vaddr == 0) || + (fadump_conf->cpu_state_entry_size == 0) || + (fadump_conf->cpu_state_entry_size > + fadump_conf->cpu_state_data_size)) { + pr_err("CPU state data is invalid. Ignoring!\n"); + return false; + } + + return true; +} + +/* + * Convert CPU state data saved at the time of crash into ELF notes. + * + * While the crashing CPU's register data is saved by the kernel, CPU state + * data for all CPUs is saved by f/w. In CPU state data provided by f/w, + * each register entry is of 16 bytes, a numerical identifier along with + * a GPR/SPR flag in the first 8 bytes and the register value in the next + * 8 bytes. For more details refer to F/W documentation. If this data is + * missing or in unsupported format, append crashing CPU's register data + * saved by the kernel in the PT_NOTE, to have something to work with in + * the vmcore file. + */ +static int __init +opal_fadump_build_cpu_notes(struct fw_dump *fadump_conf, + struct fadump_crash_info_header *fdh) +{ + u32 thread_pir, size_per_thread, regs_offset, regs_cnt, reg_esize; + struct hdat_fadump_thread_hdr *thdr; + bool is_cpu_data_valid = false; + u32 num_cpus = 1, *note_buf; + struct pt_regs regs; + char *bufp; + int rc, i; + + if (is_opal_fadump_cpu_data_valid(fadump_conf)) { + size_per_thread = fadump_conf->cpu_state_entry_size; + num_cpus = (fadump_conf->cpu_state_data_size / size_per_thread); + bufp = __va(fadump_conf->cpu_state_dest_vaddr); + is_cpu_data_valid = true; + } + + rc = fadump_setup_cpu_notes_buf(num_cpus); + if (rc != 0) + return rc; + + note_buf = (u32 *)fadump_conf->cpu_notes_buf_vaddr; + if (!is_cpu_data_valid) + goto out; + + /* + * Offset for register entries, entry size and registers count is + * duplicated in every thread header in keeping with HDAT format. + * Use these values from the first thread header. + */ + thdr = (struct hdat_fadump_thread_hdr *)bufp; + regs_offset = (offsetof(struct hdat_fadump_thread_hdr, offset) + + be32_to_cpu(thdr->offset)); + reg_esize = be32_to_cpu(thdr->esize); + regs_cnt = be32_to_cpu(thdr->ecnt); + + pr_debug("--------CPU State Data------------\n"); + pr_debug("NumCpus : %u\n", num_cpus); + pr_debug("\tOffset: %u, Entry size: %u, Cnt: %u\n", + regs_offset, reg_esize, regs_cnt); + + for (i = 0; i < num_cpus; i++, bufp += size_per_thread) { + thdr = (struct hdat_fadump_thread_hdr *)bufp; + + thread_pir = be32_to_cpu(thdr->pir); + pr_debug("[%04d] PIR: 0x%x, core state: 0x%02x\n", + i, thread_pir, thdr->core_state); + + /* + * If this is kernel initiated crash, crashing_cpu would be set + * appropriately and register data of the crashing CPU saved by + * crashing kernel. Add this saved register data of crashing CPU + * to elf notes and populate the pt_regs for the remaining CPUs + * from register state data provided by firmware. + */ + if (fdh->crashing_cpu == thread_pir) { + note_buf = fadump_regs_to_elf_notes(note_buf, + &fdh->regs); + pr_debug("Crashing CPU PIR: 0x%x - R1 : 0x%lx, NIP : 0x%lx\n", + fdh->crashing_cpu, fdh->regs.gpr[1], + fdh->regs.nip); + continue; + } + + /* + * Register state data of MAX cores is provided by firmware, + * but some of this cores may not be active. So, while + * processing register state data, check core state and + * skip threads that belong to inactive cores. + */ + if (thdr->core_state == HDAT_FADUMP_CORE_INACTIVE) + continue; + + opal_fadump_read_regs((bufp + regs_offset), regs_cnt, + reg_esize, true, ®s); + note_buf = fadump_regs_to_elf_notes(note_buf, ®s); + pr_debug("CPU PIR: 0x%x - R1 : 0x%lx, NIP : 0x%lx\n", + thread_pir, regs.gpr[1], regs.nip); + } + +out: + /* + * CPU state data is invalid/unsupported. Try appending crashing CPU's + * register data, if it is saved by the kernel. + */ + if (fadump_conf->cpu_notes_buf_vaddr == (u64)note_buf) { + if (fdh->crashing_cpu == FADUMP_CPU_UNKNOWN) { + fadump_free_cpu_notes_buf(); + return -ENODEV; + } + + pr_warn("WARNING: appending only crashing CPU's register data\n"); + note_buf = fadump_regs_to_elf_notes(note_buf, &(fdh->regs)); + } + + final_note(note_buf); + + pr_debug("Updating elfcore header (%llx) with cpu notes\n", + fdh->elfcorehdr_addr); + fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); + return 0; +} + +static int __init opal_fadump_process(struct fw_dump *fadump_conf) +{ + struct fadump_crash_info_header *fdh; + int rc = -EINVAL; + + if (!opal_fdm_active || !fadump_conf->fadumphdr_addr) + return rc; + + /* Validate the fadump crash info header */ + fdh = __va(fadump_conf->fadumphdr_addr); + if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { + pr_err("Crash info header is not valid.\n"); + return rc; + } + +#ifdef CONFIG_OPAL_CORE + /* + * If this is a kernel initiated crash, crashing_cpu would be set + * appropriately and register data of the crashing CPU saved by + * crashing kernel. Add this saved register data of crashing CPU + * to elf notes and populate the pt_regs for the remaining CPUs + * from register state data provided by firmware. + */ + if (fdh->crashing_cpu != FADUMP_CPU_UNKNOWN) + kernel_initiated = true; +#endif + + rc = opal_fadump_build_cpu_notes(fadump_conf, fdh); + if (rc) + return rc; + + /* + * We are done validating dump info and elfcore header is now ready + * to be exported. set elfcorehdr_addr so that vmcore module will + * export the elfcore header through '/proc/vmcore'. + */ + elfcorehdr_addr = fdh->elfcorehdr_addr; + + return rc; +} + +static void opal_fadump_region_show(struct fw_dump *fadump_conf, + struct seq_file *m) +{ + const struct opal_fadump_mem_struct *fdm_ptr; + u64 dumped_bytes = 0; + int i; + + if (fadump_conf->dump_active) + fdm_ptr = opal_fdm_active; + else + fdm_ptr = opal_fdm; + + for (i = 0; i < fdm_ptr->region_cnt; i++) { + /* + * Only regions that are registered for MPIPL + * would have dump data. + */ + if ((fadump_conf->dump_active) && + (i < fdm_ptr->registered_regions)) + dumped_bytes = fdm_ptr->rgn[i].size; + + seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", + fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest); + seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", + fdm_ptr->rgn[i].size, dumped_bytes); + } + + /* Dump is active. Show reserved area start address. */ + if (fadump_conf->dump_active) { + seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n", + fadump_conf->reserve_dump_area_start); + } +} + +static void opal_fadump_trigger(struct fadump_crash_info_header *fdh, + const char *msg) +{ + int rc; + + /* + * Unlike on pSeries platform, logical CPU number is not provided + * with architected register state data. So, store the crashing + * CPU's PIR instead to plug the appropriate register data for + * crashing CPU in the vmcore file. + */ + fdh->crashing_cpu = (u32)mfspr(SPRN_PIR); + + rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, msg); + if (rc == OPAL_UNSUPPORTED) { + pr_emerg("Reboot type %d not supported.\n", + OPAL_REBOOT_MPIPL); + } else if (rc == OPAL_HARDWARE) + pr_emerg("No backend support for MPIPL!\n"); +} + +static struct fadump_ops opal_fadump_ops = { + .fadump_init_mem_struct = opal_fadump_init_mem_struct, + .fadump_get_metadata_size = opal_fadump_get_metadata_size, + .fadump_setup_metadata = opal_fadump_setup_metadata, + .fadump_get_bootmem_min = opal_fadump_get_bootmem_min, + .fadump_register = opal_fadump_register, + .fadump_unregister = opal_fadump_unregister, + .fadump_invalidate = opal_fadump_invalidate, + .fadump_cleanup = opal_fadump_cleanup, + .fadump_process = opal_fadump_process, + .fadump_region_show = opal_fadump_region_show, + .fadump_trigger = opal_fadump_trigger, +}; + +void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) +{ + const __be32 *prop; + unsigned long dn; + u64 addr = 0; + int i, len; + s64 ret; + + /* + * Check if Firmware-Assisted Dump is supported. if yes, check + * if dump has been initiated on last reboot. + */ + dn = of_get_flat_dt_subnode_by_name(node, "dump"); + if (dn == -FDT_ERR_NOTFOUND) { + pr_debug("FADump support is missing!\n"); + return; + } + + if (!of_flat_dt_is_compatible(dn, "ibm,opal-dump")) { + pr_err("Support missing for this f/w version!\n"); + return; + } + + prop = of_get_flat_dt_prop(dn, "fw-load-area", &len); + if (prop) { + /* + * Each f/w load area is an (address,size) pair, + * 2 cells each, totalling 4 cells per range. + */ + for (i = 0; i < len / (sizeof(*prop) * 4); i++) { + u64 base, end; + + base = of_read_number(prop + (i * 4) + 0, 2); + end = base; + end += of_read_number(prop + (i * 4) + 2, 2); + if (end > OPAL_FADUMP_MIN_BOOT_MEM) { + pr_err("F/W load area: 0x%llx-0x%llx\n", + base, end); + pr_err("F/W version not supported!\n"); + return; + } + } + } + + fadump_conf->ops = &opal_fadump_ops; + fadump_conf->fadump_supported = 1; + + /* + * Firmware supports 32-bit field for size. Align it to PAGE_SIZE + * and request firmware to copy multiple kernel boot memory regions. + */ + fadump_conf->max_copy_size = _ALIGN_DOWN(U32_MAX, PAGE_SIZE); + + /* + * Check if dump has been initiated on last reboot. + */ + prop = of_get_flat_dt_prop(dn, "mpipl-boot", NULL); + if (!prop) + return; + + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr); + if ((ret != OPAL_SUCCESS) || !addr) { + pr_err("Failed to get Kernel metadata (%lld)\n", ret); + return; + } + + addr = be64_to_cpu(addr); + pr_debug("Kernel metadata addr: %llx\n", addr); + + opal_fdm_active = __va(addr); + if (opal_fdm_active->version != OPAL_FADUMP_VERSION) { + pr_warn("Supported kernel metadata version: %u, found: %d!\n", + OPAL_FADUMP_VERSION, opal_fdm_active->version); + pr_warn("WARNING: Kernel metadata format mismatch identified! Core file maybe corrupted..\n"); + } + + /* Kernel regions not registered with f/w for MPIPL */ + if (opal_fdm_active->registered_regions == 0) { + opal_fdm_active = NULL; + return; + } + + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr); + if (addr) { + addr = be64_to_cpu(addr); + pr_debug("CPU metadata addr: %llx\n", addr); + opal_cpu_metadata = __va(addr); + } + + pr_info("Firmware-assisted dump is active.\n"); + fadump_conf->dump_active = 1; + opal_fadump_get_config(fadump_conf, opal_fdm_active); +} +#endif /* !CONFIG_PRESERVE_FA_DUMP */ diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h new file mode 100644 index 00000000000000..f1e9ecf548c5d1 --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-fadump.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Firmware-Assisted Dump support on POWER platform (OPAL). + * + * Copyright 2019, Hari Bathini, IBM Corporation. + */ + +#ifndef _POWERNV_OPAL_FADUMP_H +#define _POWERNV_OPAL_FADUMP_H + +#include + +/* + * With kernel & initrd loaded at 512MB (with 256MB size), enforce a minimum + * boot memory size of 768MB to ensure f/w loading kernel and initrd doesn't + * mess with crash'ed kernel's memory during MPIPL. + */ +#define OPAL_FADUMP_MIN_BOOT_MEM (0x30000000UL) + +/* + * OPAL FADump metadata structure format version + * + * OPAL FADump kernel metadata structure stores kernel metadata needed to + * register-for/process crash dump. Format version is used to keep a tab on + * the changes in the structure format. The changes, if any, to the format + * are expected to be minimal and backward compatible. + */ +#define OPAL_FADUMP_VERSION 0x1 + +/* + * OPAL FADump kernel metadata + * + * The address of this structure will be registered with f/w for retrieving + * and processing during crash dump. + */ +struct opal_fadump_mem_struct { + u8 version; + u8 reserved[3]; + u16 region_cnt; /* number of regions */ + u16 registered_regions; /* Regions registered for MPIPL */ + u64 fadumphdr_addr; + struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS]; +} __packed; + +/* + * CPU state data + * + * CPU state data information is provided by f/w. The format for this data + * is defined in the HDAT spec. Version is used to keep a tab on the changes + * in this CPU state data format. Changes to this format are unlikely, but + * if there are any changes, please refer to latest HDAT specification. + */ +#define HDAT_FADUMP_CPU_DATA_VER 1 + +#define HDAT_FADUMP_CORE_INACTIVE (0x0F) + +/* HDAT thread header for register entries */ +struct hdat_fadump_thread_hdr { + __be32 pir; + /* 0x00 - 0x0F - The corresponding stop state of the core */ + u8 core_state; + u8 reserved[3]; + + __be32 offset; /* Offset to Register Entries array */ + __be32 ecnt; /* Number of entries */ + __be32 esize; /* Alloc size of each array entry in bytes */ + __be32 eactsz; /* Actual size of each array entry in bytes */ +} __packed; + +/* Register types populated by f/w */ +#define HDAT_FADUMP_REG_TYPE_GPR 0x01 +#define HDAT_FADUMP_REG_TYPE_SPR 0x02 + +/* ID numbers used by f/w while populating certain registers */ +#define HDAT_FADUMP_REG_ID_NIP 0x7D0 +#define HDAT_FADUMP_REG_ID_MSR 0x7D1 +#define HDAT_FADUMP_REG_ID_CCR 0x7D2 + +/* HDAT register entry. */ +struct hdat_fadump_reg_entry { + __be32 reg_type; + __be32 reg_num; + __be64 reg_val; +} __packed; + +static inline void opal_fadump_set_regval_regnum(struct pt_regs *regs, + u32 reg_type, u32 reg_num, + u64 reg_val) +{ + if (reg_type == HDAT_FADUMP_REG_TYPE_GPR) { + if (reg_num < 32) + regs->gpr[reg_num] = reg_val; + return; + } + + switch (reg_num) { + case SPRN_CTR: + regs->ctr = reg_val; + break; + case SPRN_LR: + regs->link = reg_val; + break; + case SPRN_XER: + regs->xer = reg_val; + break; + case SPRN_DAR: + regs->dar = reg_val; + break; + case SPRN_DSISR: + regs->dsisr = reg_val; + break; + case HDAT_FADUMP_REG_ID_NIP: + regs->nip = reg_val; + break; + case HDAT_FADUMP_REG_ID_MSR: + regs->msr = reg_val; + break; + case HDAT_FADUMP_REG_ID_CCR: + regs->ccr = reg_val; + break; + } +} + +static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt, + unsigned int reg_entry_size, + bool cpu_endian, + struct pt_regs *regs) +{ + struct hdat_fadump_reg_entry *reg_entry; + u64 val; + int i; + + memset(regs, 0, sizeof(struct pt_regs)); + + for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) { + reg_entry = (struct hdat_fadump_reg_entry *)bufp; + val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) : + reg_entry->reg_val); + opal_fadump_set_regval_regnum(regs, + be32_to_cpu(reg_entry->reg_type), + be32_to_cpu(reg_entry->reg_num), + val); + } +} + +#endif /* _POWERNV_OPAL_FADUMP_H */ diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 186109bdd41be2..e04b20625cb949 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -53,9 +53,9 @@ static void export_imc_mode_and_cmd(struct device_node *node, struct imc_pmu *pmu_ptr) { static u64 loc, *imc_mode_addr, *imc_cmd_addr; - int chip = 0, nid; char mode[16], cmd[16]; u32 cb_offset; + struct imc_mem_info *ptr = pmu_ptr->mem_info; imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); @@ -69,20 +69,20 @@ static void export_imc_mode_and_cmd(struct device_node *node, if (of_property_read_u32(node, "cb_offset", &cb_offset)) cb_offset = IMC_CNTL_BLK_OFFSET; - for_each_node(nid) { - loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset; + while (ptr->vbase != NULL) { + loc = (u64)(ptr->vbase) + cb_offset; imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET); - sprintf(mode, "imc_mode_%d", nid); + sprintf(mode, "imc_mode_%d", (u32)(ptr->id)); if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent, imc_mode_addr)) goto err; imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET); - sprintf(cmd, "imc_cmd_%d", nid); + sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id)); if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent, imc_cmd_addr)) goto err; - chip++; + ptr++; } return; diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index dc51d03c63709f..d26da19a611f1d 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -29,23 +29,23 @@ struct memcons { static struct memcons *opal_memcons = NULL; -ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) +ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count) { const char *conbuf; ssize_t ret; size_t first_read = 0; uint32_t out_pos, avail; - if (!opal_memcons) + if (!mc) return -ENODEV; - out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos)); + out_pos = be32_to_cpu(READ_ONCE(mc->out_pos)); /* Now we've read out_pos, put a barrier in before reading the new * data it points to in conbuf. */ smp_rmb(); - conbuf = phys_to_virt(be64_to_cpu(opal_memcons->obuf_phys)); + conbuf = phys_to_virt(be64_to_cpu(mc->obuf_phys)); /* When the buffer has wrapped, read from the out_pos marker to the end * of the buffer, and then read the remaining data as in the un-wrapped @@ -53,7 +53,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) if (out_pos & MEMCONS_OUT_POS_WRAP) { out_pos &= MEMCONS_OUT_POS_MASK; - avail = be32_to_cpu(opal_memcons->obuf_size) - out_pos; + avail = be32_to_cpu(mc->obuf_size) - out_pos; ret = memory_read_from_buffer(to, count, &pos, conbuf + out_pos, avail); @@ -71,7 +71,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) } /* Sanity check. The firmware should not do this to us. */ - if (out_pos > be32_to_cpu(opal_memcons->obuf_size)) { + if (out_pos > be32_to_cpu(mc->obuf_size)) { pr_err("OPAL: memory console corruption. Aborting read.\n"); return -EINVAL; } @@ -86,6 +86,11 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) return ret; } +ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) +{ + return memcons_copy(opal_memcons, to, pos, count); +} + static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) @@ -98,32 +103,48 @@ static struct bin_attribute opal_msglog_attr = { .read = opal_msglog_read }; -void __init opal_msglog_init(void) +struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name) { u64 mcaddr; struct memcons *mc; - if (of_property_read_u64(opal_node, "ibm,opal-memcons", &mcaddr)) { - pr_warn("OPAL: Property ibm,opal-memcons not found, no message log\n"); - return; + if (of_property_read_u64(node, mc_prop_name, &mcaddr)) { + pr_warn("%s property not found, no message log\n", + mc_prop_name); + goto out_err; } mc = phys_to_virt(mcaddr); if (!mc) { - pr_warn("OPAL: memory console address is invalid\n"); - return; + pr_warn("memory console address is invalid\n"); + goto out_err; } if (be64_to_cpu(mc->magic) != MEMCONS_MAGIC) { - pr_warn("OPAL: memory console version is invalid\n"); - return; + pr_warn("memory console version is invalid\n"); + goto out_err; } - /* Report maximum size */ - opal_msglog_attr.size = be32_to_cpu(mc->ibuf_size) + - be32_to_cpu(mc->obuf_size); + return mc; + +out_err: + return NULL; +} + +u32 memcons_get_size(struct memcons *mc) +{ + return be32_to_cpu(mc->ibuf_size) + be32_to_cpu(mc->obuf_size); +} + +void __init opal_msglog_init(void) +{ + opal_memcons = memcons_init(opal_node, "ibm,opal-memcons"); + if (!opal_memcons) { + pr_warn("OPAL: memcons failed to load from ibm,opal-memcons\n"); + return; + } - opal_memcons = mc; + opal_msglog_attr.size = memcons_get_size(opal_memcons); } void __init opal_msglog_sysfs_init(void) diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index e072bf157d6285..45f4223a790f2a 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -342,7 +342,7 @@ static int opal_prd_msg_notifier(struct notifier_block *nb, int msg_size, item_size; unsigned long flags; - if (msg_type != OPAL_MSG_PRD) + if (msg_type != OPAL_MSG_PRD && msg_type != OPAL_MSG_PRD2) return 0; /* Calculate total size of the message and item we need to store. The @@ -393,6 +393,12 @@ static int opal_prd_probe(struct platform_device *pdev) return rc; } + rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb); + if (rc) { + pr_err("Couldn't register PRD2 event notifier\n"); + return rc; + } + rc = misc_register(&opal_prd_dev); if (rc) { pr_err("failed to register miscdev\n"); diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 66430eebe869a3..fd510d961b8c7c 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -1,7 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * PowerNV LPC bus handling. + * PowerNV SCOM bus debugfs interface * + * Copyright 2010 Benjamin Herrenschmidt, IBM Corp + * + * and David Gibson, IBM Corporation. * Copyright 2013 IBM Corp. */ @@ -10,62 +13,13 @@ #include #include #include +#include #include #include #include -#include - -/* - * We could probably fit that inside the scom_map_t - * which is a void* after all but it's really too ugly - * so let's kmalloc it for now - */ -struct opal_scom_map { - uint32_t chip; - uint64_t addr; -}; - -static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count) -{ - struct opal_scom_map *m; - const __be32 *gcid; - - if (!of_get_property(dev, "scom-controller", NULL)) { - pr_err("%s: device %pOF is not a SCOM controller\n", - __func__, dev); - return SCOM_MAP_INVALID; - } - gcid = of_get_property(dev, "ibm,chip-id", NULL); - if (!gcid) { - pr_err("%s: device %pOF has no ibm,chip-id\n", - __func__, dev); - return SCOM_MAP_INVALID; - } - m = kmalloc(sizeof(*m), GFP_KERNEL); - if (!m) - return NULL; - m->chip = be32_to_cpup(gcid); - m->addr = reg; - - return (scom_map_t)m; -} - -static void opal_scom_unmap(scom_map_t map) -{ - kfree(map); -} - -static int opal_xscom_err_xlate(int64_t rc) -{ - switch(rc) { - case 0: - return 0; - /* Add more translations if necessary */ - default: - return -EIO; - } -} +#include +#include static u64 opal_scom_unmangle(u64 addr) { @@ -98,39 +52,154 @@ static u64 opal_scom_unmangle(u64 addr) return addr; } -static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) +static int opal_scom_read(uint32_t chip, uint64_t addr, u64 reg, u64 *value) { - struct opal_scom_map *m = map; int64_t rc; __be64 v; - reg = opal_scom_unmangle(m->addr + reg); - rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v)); + reg = opal_scom_unmangle(addr + reg); + rc = opal_xscom_read(chip, reg, (__be64 *)__pa(&v)); + if (rc) { + *value = 0xfffffffffffffffful; + return -EIO; + } *value = be64_to_cpu(v); - return opal_xscom_err_xlate(rc); + return 0; } -static int opal_scom_write(scom_map_t map, u64 reg, u64 value) +static int opal_scom_write(uint32_t chip, uint64_t addr, u64 reg, u64 value) { - struct opal_scom_map *m = map; int64_t rc; - reg = opal_scom_unmangle(m->addr + reg); - rc = opal_xscom_write(m->chip, reg, value); - return opal_xscom_err_xlate(rc); + reg = opal_scom_unmangle(addr + reg); + rc = opal_xscom_write(chip, reg, value); + if (rc) + return -EIO; + return 0; +} + +struct scom_debug_entry { + u32 chip; + struct debugfs_blob_wrapper path; + char name[16]; +}; + +static ssize_t scom_debug_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct scom_debug_entry *ent = filp->private_data; + u64 __user *ubuf64 = (u64 __user *)ubuf; + loff_t off = *ppos; + ssize_t done = 0; + u64 reg, reg_base, reg_cnt, val; + int rc; + + if (off < 0 || (off & 7) || (count & 7)) + return -EINVAL; + reg_base = off >> 3; + reg_cnt = count >> 3; + + for (reg = 0; reg < reg_cnt; reg++) { + rc = opal_scom_read(ent->chip, reg_base, reg, &val); + if (!rc) + rc = put_user(val, ubuf64); + if (rc) { + if (!done) + done = rc; + break; + } + ubuf64++; + *ppos += 8; + done += 8; + } + return done; +} + +static ssize_t scom_debug_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct scom_debug_entry *ent = filp->private_data; + u64 __user *ubuf64 = (u64 __user *)ubuf; + loff_t off = *ppos; + ssize_t done = 0; + u64 reg, reg_base, reg_cnt, val; + int rc; + + if (off < 0 || (off & 7) || (count & 7)) + return -EINVAL; + reg_base = off >> 3; + reg_cnt = count >> 3; + + for (reg = 0; reg < reg_cnt; reg++) { + rc = get_user(val, ubuf64); + if (!rc) + rc = opal_scom_write(ent->chip, reg_base, reg, val); + if (rc) { + if (!done) + done = rc; + break; + } + ubuf64++; + done += 8; + } + return done; } -static const struct scom_controller opal_scom_controller = { - .map = opal_scom_map, - .unmap = opal_scom_unmap, - .read = opal_scom_read, - .write = opal_scom_write +static const struct file_operations scom_debug_fops = { + .read = scom_debug_read, + .write = scom_debug_write, + .open = simple_open, + .llseek = default_llseek, }; -static int opal_xscom_init(void) +static int scom_debug_init_one(struct dentry *root, struct device_node *dn, + int chip) { - if (firmware_has_feature(FW_FEATURE_OPAL)) - scom_init(&opal_scom_controller); + struct scom_debug_entry *ent; + struct dentry *dir; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return -ENOMEM; + + ent->chip = chip; + snprintf(ent->name, 16, "%08x", chip); + ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn); + ent->path.size = strlen((char *)ent->path.data); + + dir = debugfs_create_dir(ent->name, root); + if (!dir) { + kfree(ent->path.data); + kfree(ent); + return -1; + } + + debugfs_create_blob("devspec", 0400, dir, &ent->path); + debugfs_create_file("access", 0600, dir, ent, &scom_debug_fops); + return 0; } -machine_arch_initcall(powernv, opal_xscom_init); + +static int scom_debug_init(void) +{ + struct device_node *dn; + struct dentry *root; + int chip, rc; + + if (!firmware_has_feature(FW_FEATURE_OPAL)) + return 0; + + root = debugfs_create_dir("scom", powerpc_debugfs_root); + if (!root) + return -1; + + rc = 0; + for_each_node_with_property(dn, "scom-controller") { + chip = of_get_ibm_chip_id(dn); + WARN_ON(chip == -1); + rc |= scom_debug_init_one(root, dn, chip); + } + + return rc; +} +device_initcall(scom_debug_init); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index aba443be7daafd..38e90270280bf6 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -58,6 +58,8 @@ static DEFINE_SPINLOCK(opal_write_lock); static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; static uint32_t opal_heartbeat; static struct task_struct *kopald_tsk; +static struct opal_msg *opal_msg; +static u32 opal_msg_size __ro_after_init; void opal_configure_cores(void) { @@ -271,14 +273,9 @@ static void opal_message_do_notify(uint32_t msg_type, void *msg) static void opal_handle_message(void) { s64 ret; - /* - * TODO: pre-allocate a message buffer depending on opal-msg-size - * value in /proc/device-tree. - */ - static struct opal_msg msg; u32 type; - ret = opal_get_msg(__pa(&msg), sizeof(msg)); + ret = opal_get_msg(__pa(opal_msg), opal_msg_size); /* No opal message pending. */ if (ret == OPAL_RESOURCE) return; @@ -290,14 +287,14 @@ static void opal_handle_message(void) return; } - type = be32_to_cpu(msg.msg_type); + type = be32_to_cpu(opal_msg->msg_type); /* Sanity check */ if (type >= OPAL_MSG_TYPE_MAX) { pr_warn_once("%s: Unknown message type: %u\n", __func__, type); return; } - opal_message_do_notify(type, (void *)&msg); + opal_message_do_notify(type, (void *)opal_msg); } static irqreturn_t opal_message_notify(int irq, void *data) @@ -306,10 +303,24 @@ static irqreturn_t opal_message_notify(int irq, void *data) return IRQ_HANDLED; } -static int __init opal_message_init(void) +static int __init opal_message_init(struct device_node *opal_node) { int ret, i, irq; + ret = of_property_read_u32(opal_node, "opal-msg-size", &opal_msg_size); + if (ret) { + pr_notice("Failed to read opal-msg-size property\n"); + opal_msg_size = sizeof(struct opal_msg); + } + + opal_msg = kmalloc(opal_msg_size, GFP_KERNEL); + if (!opal_msg) { + opal_msg_size = sizeof(struct opal_msg); + /* Try to allocate fixed message size */ + opal_msg = kmalloc(opal_msg_size, GFP_KERNEL); + BUG_ON(opal_msg == NULL); + } + for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); @@ -705,7 +716,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj, bin_attr->size); } -static BIN_ATTR_RO(symbol_map, 0); +static struct bin_attribute symbol_map_attr = { + .attr = {.name = "symbol_map", .mode = 0400}, + .read = symbol_map_read +}; static void opal_export_symmap(void) { @@ -722,10 +736,10 @@ static void opal_export_symmap(void) return; /* Setup attributes */ - bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0])); - bin_attr_symbol_map.size = be64_to_cpu(syms[1]); + symbol_map_attr.private = __va(be64_to_cpu(syms[0])); + symbol_map_attr.size = be64_to_cpu(syms[1]); - rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map); + rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr); if (rc) pr_warn("Error %d creating OPAL symbols file\n", rc); } @@ -910,7 +924,7 @@ static int __init opal_init(void) } /* Initialise OPAL messaging system */ - opal_message_init(); + opal_message_init(opal_node); /* Initialise OPAL asynchronous completion interface */ opal_async_comp_init(); diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index e28f03e1eb5eb5..a0b9c0c23ed278 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift) struct page *tce_mem = NULL; __be64 *addr; - tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT); + tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN, + shift - PAGE_SHIFT); if (!tce_mem) { pr_err("Failed to allocate a TCE memory, level shift=%d\n", shift); @@ -48,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift) return addr; } +static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr, + unsigned long size, unsigned int levels); + static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) { __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base; @@ -57,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) while (level) { int n = (idx & mask) >> (level * shift); - unsigned long tce; + unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n])); - if (tmp[n] == 0) { + if (!tce) { __be64 *tmp2; if (!alloc) @@ -70,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) if (!tmp2) return NULL; - tmp[n] = cpu_to_be64(__pa(tmp2) | - TCE_PCI_READ | TCE_PCI_WRITE); + tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE; + oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0, + cpu_to_be64(tce))); + if (oldtce) { + pnv_pci_ioda2_table_do_free_pages(tmp2, + ilog2(tbl->it_level_size) + 3, 1); + tce = oldtce; + } } - tce = be64_to_cpu(tmp[n]); tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE)); idx &= ~mask; @@ -161,6 +170,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages) if (ptce) *ptce = cpu_to_be64(0); + else + /* Skip the rest of the level */ + i |= tbl->it_level_size - 1; } } @@ -260,7 +272,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, unsigned int table_shift = max_t(unsigned int, entries_shift + 3, PAGE_SHIFT); const unsigned long tce_table_size = 1UL << table_shift; - unsigned int tmplevels = levels; if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS)) return -EINVAL; @@ -268,9 +279,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, if (!is_power_of_2(window_size)) return -EINVAL; - if (alloc_userspace_copy && (window_size > (1ULL << 32))) - tmplevels = 1; - /* Adjust direct table size from window_size and levels */ entries_shift = (entries_shift + levels - 1) / levels; level_shift = entries_shift + 3; @@ -281,7 +289,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - tmplevels, tce_table_size, &offset, &total_allocated); + 1, tce_table_size, &offset, &total_allocated); /* addr==NULL means that the first level allocation failed */ if (!addr) @@ -292,18 +300,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, * we did not allocate as much as we wanted, * release partially allocated table. */ - if (tmplevels == levels && offset < tce_table_size) + if (levels == 1 && offset < tce_table_size) goto free_tces_exit; /* Allocate userspace view of the TCE table */ if (alloc_userspace_copy) { offset = 0; uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - tmplevels, tce_table_size, &offset, + 1, tce_table_size, &offset, &total_allocated_uas); if (!uas) goto free_tces_exit; - if (tmplevels == levels && (offset < tce_table_size || + if (levels == 1 && (offset < tce_table_size || total_allocated_uas != total_allocated)) goto free_uas_exit; } @@ -318,7 +326,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n", window_size, tce_table_size, bus_offset, tbl->it_base, - tbl->it_userspace, tmplevels, levels); + tbl->it_userspace, 1, levels); return 0; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index d8080558d0201d..c28d0d9b7ee0fc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1939,26 +1939,12 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, } #ifdef CONFIG_IOMMU_API -static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index, - unsigned long *hpa, enum dma_data_direction *direction) +/* Common for IODA1 and IODA2 */ +static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, + unsigned long *hpa, enum dma_data_direction *direction, + bool realmode) { - long ret = pnv_tce_xchg(tbl, index, hpa, direction, true); - - if (!ret) - pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false); - - return ret; -} - -static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index, - unsigned long *hpa, enum dma_data_direction *direction) -{ - long ret = pnv_tce_xchg(tbl, index, hpa, direction, false); - - if (!ret) - pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true); - - return ret; + return pnv_tce_xchg(tbl, index, hpa, direction, !realmode); } #endif @@ -1973,8 +1959,8 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, static struct iommu_table_ops pnv_ioda1_iommu_ops = { .set = pnv_ioda1_tce_build, #ifdef CONFIG_IOMMU_API - .exchange = pnv_ioda1_tce_xchg, - .exchange_rm = pnv_ioda1_tce_xchg_rm, + .xchg_no_kill = pnv_ioda_tce_xchg_no_kill, + .tce_kill = pnv_pci_p7ioc_tce_invalidate, .useraddrptr = pnv_tce_useraddrptr, #endif .clear = pnv_ioda1_tce_free, @@ -2103,30 +2089,6 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, return ret; } -#ifdef CONFIG_IOMMU_API -static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index, - unsigned long *hpa, enum dma_data_direction *direction) -{ - long ret = pnv_tce_xchg(tbl, index, hpa, direction, true); - - if (!ret) - pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false); - - return ret; -} - -static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index, - unsigned long *hpa, enum dma_data_direction *direction) -{ - long ret = pnv_tce_xchg(tbl, index, hpa, direction, false); - - if (!ret) - pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true); - - return ret; -} -#endif - static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, long npages) { @@ -2138,8 +2100,8 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, static struct iommu_table_ops pnv_ioda2_iommu_ops = { .set = pnv_ioda2_tce_build, #ifdef CONFIG_IOMMU_API - .exchange = pnv_ioda2_tce_xchg, - .exchange_rm = pnv_ioda2_tce_xchg_rm, + .xchg_no_kill = pnv_ioda_tce_xchg_no_kill, + .tce_kill = pnv_pci_ioda2_tce_invalidate, .useraddrptr = pnv_tce_useraddrptr, #endif .clear = pnv_ioda2_tce_free, @@ -2303,7 +2265,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, tbl->it_ops = &pnv_ioda1_iommu_ops; pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; - iommu_init_table(tbl, phb->hose->node); + iommu_init_table(tbl, phb->hose->node, 0, 0); if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) pnv_ioda_setup_bus_dma(pe, pe->pbus); @@ -2420,6 +2382,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) { struct iommu_table *tbl = NULL; long rc; + unsigned long res_start, res_end; /* * crashkernel= specifies the kdump kernel's maximum memory at @@ -2433,19 +2396,46 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) * DMA window can be larger than available memory, which will * cause errors later. */ - const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory); + const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER - 1); + + /* + * We create the default window as big as we can. The constraint is + * the max order of allocation possible. The TCE table is likely to + * end up being multilevel and with on-demand allocation in place, + * the initial use is not going to be huge as the default window aims + * to support crippled devices (i.e. not fully 64bit DMAble) only. + */ + /* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */ + const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory); + /* Each TCE level cannot exceed maxblock so go multilevel if needed */ + unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT); + unsigned long tcelevel_order = ilog2(maxblock >> 3); + unsigned int levels = tces_order / tcelevel_order; + + if (tces_order % tcelevel_order) + levels += 1; + /* + * We try to stick to default levels (which is >1 at the moment) in + * order to save memory by relying on on-demain TCE level allocation. + */ + levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS); - rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, - IOMMU_PAGE_SHIFT_4K, - window_size, - POWERNV_IOMMU_DEFAULT_LEVELS, false, &tbl); + rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT, + window_size, levels, false, &tbl); if (rc) { pe_err(pe, "Failed to create 32-bit TCE table, err %ld", rc); return rc; } - iommu_init_table(tbl, pe->phb->hose->node); + /* We use top part of 32bit space for MMIO so exclude it from DMA */ + res_start = 0; + res_end = 0; + if (window_size > pe->phb->ioda.m32_pci_base) { + res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; + res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; + } + iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end); rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); if (rc) { diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 6104418c9ad5d3..2825d004dece27 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -54,7 +54,8 @@ int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) break; } - if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) { + if (!of_device_is_compatible(parent, "ibm,ioda2-phb") && + !of_device_is_compatible(parent, "ibm,ioda3-phb")) { of_node_put(parent); continue; } diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 469c2446324775..f914f0b14e4e3b 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -219,7 +219,7 @@ extern struct iommu_table_group *pnv_npu_compound_attach( struct pnv_ioda_pe *pe); /* pci-ioda-tce.c */ -#define POWERNV_IOMMU_DEFAULT_LEVELS 1 +#define POWERNV_IOMMU_DEFAULT_LEVELS 2 #define POWERNV_IOMMU_MAX_LEVELS 5 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index fd4a1c5a6369f4..1aa51c4fa90456 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -30,4 +30,9 @@ extern void opal_event_shutdown(void); bool cpu_core_split_required(void); +struct memcons; +ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count); +u32 memcons_get_size(struct memcons *mc); +struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name); + #endif /* _POWERNV_H */ diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index a5e52f9eed3cc9..83498604d322be 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -166,6 +167,14 @@ static void __init pnv_init(void) else #endif add_preferred_console("hvc", 0, NULL); + + if (!radix_enabled()) { + int i; + + /* Allocate per cpu area to save old slb contents during MCE */ + for_each_possible_cpu(i) + paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i)); + } } static void __init pnv_init_IRQ(void) diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c new file mode 100644 index 00000000000000..e4a00ad06f9d3f --- /dev/null +++ b/arch/powerpc/platforms/powernv/ultravisor.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Ultravisor high level interfaces + * + * Copyright 2019, IBM Corporation. + * + */ +#include +#include +#include +#include + +#include +#include +#include + +#include "powernv.h" + +static struct kobject *ultravisor_kobj; + +int __init early_init_dt_scan_ultravisor(unsigned long node, const char *uname, + int depth, void *data) +{ + if (!of_flat_dt_is_compatible(node, "ibm,ultravisor")) + return 0; + + powerpc_firmware_features |= FW_FEATURE_ULTRAVISOR; + pr_debug("Ultravisor detected!\n"); + return 1; +} + +static struct memcons *uv_memcons; + +static ssize_t uv_msglog_read(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *to, + loff_t pos, size_t count) +{ + return memcons_copy(uv_memcons, to, pos, count); +} + +static struct bin_attribute uv_msglog_attr = { + .attr = {.name = "msglog", .mode = 0400}, + .read = uv_msglog_read +}; + +static int __init uv_init(void) +{ + struct device_node *node; + + if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) + return 0; + + node = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); + if (!node) + return -ENODEV; + + uv_memcons = memcons_init(node, "memcons"); + if (!uv_memcons) + return -ENOENT; + + uv_msglog_attr.size = memcons_get_size(uv_memcons); + + ultravisor_kobj = kobject_create_and_add("ultravisor", firmware_kobj); + if (!ultravisor_kobj) + return -ENOMEM; + + return sysfs_create_bin_file(ultravisor_kobj, &uv_msglog_attr); +} +machine_subsys_initcall(powernv, uv_init); diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index bdaeaecdc06b42..1193c294b8d0cd 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -184,10 +184,7 @@ static void spu_unmap(struct spu *spu) * setup_areas - Map the spu regions into the address space. * * The current HV requires the spu shadow regs to be mapped with the - * PTE page protection bits set as read-only (PP=3). This implementation - * uses the low level __ioremap() to bypass the page protection settings - * inforced by ioremap_prot() to get the needed PTE bits set for the - * shadow regs. + * PTE page protection bits set as read-only. */ static int __init setup_areas(struct spu *spu) @@ -195,9 +192,8 @@ static int __init setup_areas(struct spu *spu) struct table {char* name; unsigned long addr; unsigned long size;}; unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO)); - spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr, - sizeof(struct spe_shadow), - shadow_flags); + spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, + sizeof(struct spe_shadow), shadow_flags); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index f7b484f55553bc..9e35cddddf7307 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -145,3 +145,17 @@ config PAPR_SCM tristate "Support for the PAPR Storage Class Memory interface" help Enable access to hypervisor provided storage class memory. + +config PPC_SVM + bool "Secure virtual machine (SVM) support for POWER" + depends on PPC_PSERIES + select SWIOTLB + select ARCH_HAS_MEM_ENCRYPT + select ARCH_HAS_FORCE_DMA_UNENCRYPTED + help + There are certain POWER platforms which support secure guests using + the Protected Execution Facility, with the help of an Ultravisor + executing below the hypervisor layer. This enables support for + those guests. + + If unsure, say "N". diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index ab3d59aeaccaaa..a3c74a5cf20d74 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -26,6 +26,8 @@ obj-$(CONFIG_IBMVIO) += vio.o obj-$(CONFIG_IBMEBUS) += ibmebus.o obj-$(CONFIG_PAPR_SCM) += papr_scm.o obj-$(CONFIG_PPC_SPLPAR) += vphn.o +obj-$(CONFIG_PPC_SVM) += svm.o +obj-$(CONFIG_FA_DUMP) += rtas-fadump.o ifdef CONFIG_PPC_PSERIES obj-$(CONFIG_SUSPEND) += suspend.o diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 9edae1863e2f1d..893ba3f562c4e7 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -42,42 +42,44 @@ static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; static int ibm_configure_pe; -#ifdef CONFIG_PCI_IOV void pseries_pcibios_bus_add_device(struct pci_dev *pdev) { struct pci_dn *pdn = pci_get_pdn(pdev); - struct pci_dn *physfn_pdn; - struct eeh_dev *edev; - if (!pdev->is_virtfn) + if (eeh_has_flag(EEH_FORCE_DISABLED)) return; - pdn->device_id = pdev->device; - pdn->vendor_id = pdev->vendor; - pdn->class_code = pdev->class; - /* - * Last allow unfreeze return code used for retrieval - * by user space in eeh-sysfs to show the last command - * completion from platform. - */ - pdn->last_allow_rc = 0; - physfn_pdn = pci_get_pdn(pdev->physfn); - pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index]; - edev = pdn_to_eeh_dev(pdn); + dev_dbg(&pdev->dev, "EEH: Setting up device\n"); +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn) { + struct pci_dn *physfn_pdn; - /* - * The following operations will fail if VF's sysfs files - * aren't created or its resources aren't finalized. - */ + pdn->device_id = pdev->device; + pdn->vendor_id = pdev->vendor; + pdn->class_code = pdev->class; + /* + * Last allow unfreeze return code used for retrieval + * by user space in eeh-sysfs to show the last command + * completion from platform. + */ + pdn->last_allow_rc = 0; + physfn_pdn = pci_get_pdn(pdev->physfn); + pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index]; + } +#endif eeh_add_device_early(pdn); eeh_add_device_late(pdev); - edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8); - eeh_rmv_from_parent_pe(edev); /* Remove as it is adding to bus pe */ - eeh_add_to_parent_pe(edev); /* Add as VF PE type */ - eeh_sysfs_add_device(pdev); +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn) { + struct eeh_dev *edev = pdn_to_eeh_dev(pdn); -} + edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8); + eeh_rmv_from_parent_pe(edev); /* Remove as it is adding to bus pe */ + eeh_add_to_parent_pe(edev); /* Add as VF PE type */ + } #endif + eeh_sysfs_add_device(pdev); +} /* * Buffer for reporting slot-error-detail rtas calls. Its here @@ -144,10 +146,8 @@ static int pseries_eeh_init(void) /* Set EEH probe mode */ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); -#ifdef CONFIG_PCI_IOV /* Set EEH machine dependent code */ ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device; -#endif return 0; } @@ -251,6 +251,8 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) return NULL; + eeh_edev_dbg(edev, "Probing device\n"); + /* * Update class code and mode of eeh device. We need * correctly reflects that current device is root port @@ -280,8 +282,11 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8); /* Enable EEH on the device */ + eeh_edev_dbg(edev, "Enabling EEH on device\n"); ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); - if (!ret) { + if (ret) { + eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret); + } else { /* Retrieve PE address */ edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); pe.addr = edev->pe_config_addr; @@ -297,11 +302,6 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) if (enable) { eeh_add_flag(EEH_ENABLED); eeh_add_to_parent_pe(edev); - - pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n", - __func__, pdn->busno, PCI_SLOT(pdn->devfn), - PCI_FUNC(pdn->devfn), pe.phb->global_number, - pe.addr); } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) && (pdn_to_eeh_dev(pdn->parent))->pe) { /* This device doesn't support EEH, but it may have an @@ -310,6 +310,8 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; eeh_add_to_parent_pe(edev); } + eeh_edev_dbg(edev, "EEH is %s on device (code %d)\n", + (enable ? "enabled" : "unsupported"), ret); } /* Save memory bars */ diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 46d0d35b9ca449..8e700390f3d6c8 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -880,34 +880,44 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) switch (hp_elog->action) { case PSERIES_HP_ELOG_ACTION_ADD: - if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) { + switch (hp_elog->id_type) { + case PSERIES_HP_ELOG_ID_DRC_COUNT: count = hp_elog->_drc_u.drc_count; rc = dlpar_memory_add_by_count(count); - } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { + break; + case PSERIES_HP_ELOG_ID_DRC_INDEX: drc_index = hp_elog->_drc_u.drc_index; rc = dlpar_memory_add_by_index(drc_index); - } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) { + break; + case PSERIES_HP_ELOG_ID_DRC_IC: count = hp_elog->_drc_u.ic.count; drc_index = hp_elog->_drc_u.ic.index; rc = dlpar_memory_add_by_ic(count, drc_index); - } else { + break; + default: rc = -EINVAL; + break; } break; case PSERIES_HP_ELOG_ACTION_REMOVE: - if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) { + switch (hp_elog->id_type) { + case PSERIES_HP_ELOG_ID_DRC_COUNT: count = hp_elog->_drc_u.drc_count; rc = dlpar_memory_remove_by_count(count); - } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { + break; + case PSERIES_HP_ELOG_ID_DRC_INDEX: drc_index = hp_elog->_drc_u.drc_index; rc = dlpar_memory_remove_by_index(drc_index); - } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) { + break; + case PSERIES_HP_ELOG_ID_DRC_IC: count = hp_elog->_drc_u.ic.count; drc_index = hp_elog->_drc_u.ic.index; rc = dlpar_memory_remove_by_ic(count, drc_index); - } else { + break; + default: rc = -EINVAL; + break; } break; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 889dc2e44b89bb..6ba081dd61c935 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "pseries.h" @@ -609,7 +610,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) iommu_table_setparms(pci->phb, dn, tbl); tbl->it_ops = &iommu_table_pseries_ops; - iommu_init_table(tbl, pci->phb->node); + iommu_init_table(tbl, pci->phb->node, 0, 0); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; @@ -621,7 +622,8 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) #ifdef CONFIG_IOMMU_API static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned - long *tce, enum dma_data_direction *direction) + long *tce, enum dma_data_direction *direction, + bool realmode) { long rc; unsigned long ioba = (unsigned long) index << tbl->it_page_shift; @@ -649,7 +651,7 @@ static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned struct iommu_table_ops iommu_table_lpar_multi_ops = { .set = tce_buildmulti_pSeriesLP, #ifdef CONFIG_IOMMU_API - .exchange = tce_exchange_pseries, + .xchg_no_kill = tce_exchange_pseries, #endif .clear = tce_freemulti_pSeriesLP, .get = tce_get_pSeriesLP @@ -690,7 +692,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) iommu_table_setparms_lpar(ppci->phb, pdn, tbl, ppci->table_group, dma_window); tbl->it_ops = &iommu_table_lpar_multi_ops; - iommu_init_table(tbl, ppci->phb->node); + iommu_init_table(tbl, ppci->phb->node, 0, 0); iommu_register_group(ppci->table_group, pci_domain_nr(bus), 0); pr_debug(" created table: %p\n", ppci->table_group); @@ -719,7 +721,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) tbl = PCI_DN(dn)->table_group->tables[0]; iommu_table_setparms(phb, dn, tbl); tbl->it_ops = &iommu_table_pseries_ops; - iommu_init_table(tbl, phb->node); + iommu_init_table(tbl, phb->node, 0, 0); set_iommu_table_base(&dev->dev, tbl); return; } @@ -1169,7 +1171,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) iommu_table_setparms_lpar(pci->phb, pdn, tbl, pci->table_group, dma_window); tbl->it_ops = &iommu_table_lpar_multi_ops; - iommu_init_table(tbl, pci->phb->node); + iommu_init_table(tbl, pci->phb->node, 0, 0); iommu_register_group(pci->table_group, pci_domain_nr(pci->phb->bus), 0); pr_debug(" created table: %p\n", pci->table_group); @@ -1318,7 +1320,15 @@ void iommu_init_early_pSeries(void) of_reconfig_notifier_register(&iommu_reconfig_nb); register_memory_notifier(&iommu_mem_nb); - set_pci_dma_ops(&dma_iommu_ops); + /* + * Secure guest memory is inacessible to devices so regular DMA isn't + * possible. + * + * In that case keep devices' dma_map_ops as NULL so that the generic + * DMA code path will use SWIOTLB to bounce buffers for DMA. + */ + if (!is_secure_guest()) + set_pci_dma_ops(&dma_iommu_ops); } static int __init disable_multitce(char *str) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 09bb878c21e0cf..36b846f6e74ee5 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1413,7 +1413,10 @@ static int pseries_lpar_resize_hpt_commit(void *data) return 0; } -/* Must be called in user context */ +/* + * Must be called in process context. The caller must hold the + * cpus_lock. + */ static int pseries_lpar_resize_hpt(unsigned long shift) { struct hpt_resize_state state = { @@ -1467,7 +1470,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift) t1 = ktime_get(); - rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL); + rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit, + &state, NULL); t2 = ktime_get(); @@ -1527,16 +1531,24 @@ void __init hpte_init_pseries(void) mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; - register_process_table = pseries_lpar_register_process_table; if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; + + /* + * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall + * to inform the hypervisor that we wish to use the HPT. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + pseries_lpar_register_process_table(0, 0, 0); } void radix_init_pseries(void) { pr_info("Using radix MMU under hypervisor\n"); - register_process_table = pseries_lpar_register_process_table; + + pseries_lpar_register_process_table(__pa(process_tb), + 0, PRTB_SIZE_SHIFT - 12); } #ifdef CONFIG_PPC_SMLPAR diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index fe812bebdf5e61..b571285f6c149e 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -207,7 +208,11 @@ static int update_dt_node(__be32 phandle, s32 scope) prop_data += vd; } + + cond_resched(); } + + cond_resched(); } while (rtas_rc == 1); of_node_put(dn); @@ -310,8 +315,12 @@ int pseries_devicetree_update(s32 scope) add_dt_node(phandle, drc_index); break; } + + cond_resched(); } } + + cond_resched(); } while (rc == 1); kfree(rtas_buf); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 1eae1d09980cf8..722830978639f0 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -229,8 +229,7 @@ void __init pSeries_final_fixup(void) pSeries_request_regions(); - eeh_probe_devices(); - eeh_addr_cache_build(); + eeh_show_enabled(); #ifdef CONFIG_PCI_IOV ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable; diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index f16fdd0f71f716..3acdcc3bb908c6 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -76,6 +76,7 @@ struct pseries_mc_errorlog { #define MC_ERROR_TYPE_UE 0x00 #define MC_ERROR_TYPE_SLB 0x01 #define MC_ERROR_TYPE_ERAT 0x02 +#define MC_ERROR_TYPE_UNKNOWN 0x03 #define MC_ERROR_TYPE_TLB 0x04 #define MC_ERROR_TYPE_D_CACHE 0x05 #define MC_ERROR_TYPE_I_CACHE 0x07 @@ -87,6 +88,9 @@ struct pseries_mc_errorlog { #define MC_ERROR_UE_LOAD_STORE 3 #define MC_ERROR_UE_PAGE_TABLE_WALK_LOAD_STORE 4 +#define UE_EFFECTIVE_ADDR_PROVIDED 0x40 +#define UE_LOGICAL_ADDR_PROVIDED 0x20 + #define MC_ERROR_SLB_PARITY 0 #define MC_ERROR_SLB_MULTIHIT 1 #define MC_ERROR_SLB_INDETERMINATE 2 @@ -113,27 +117,6 @@ static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog) } } -static -inline u64 rtas_mc_get_effective_addr(const struct pseries_mc_errorlog *mlog) -{ - __be64 addr = 0; - - switch (mlog->error_type) { - case MC_ERROR_TYPE_UE: - if (mlog->sub_err_type & 0x40) - addr = mlog->effective_address; - break; - case MC_ERROR_TYPE_SLB: - case MC_ERROR_TYPE_ERAT: - case MC_ERROR_TYPE_TLB: - if (mlog->sub_err_type & 0x80) - addr = mlog->effective_address; - default: - break; - } - return be64_to_cpu(addr); -} - /* * Enable the hotplug interrupt late because processing them may touch other * devices or systems (e.g. hugepages) that have not been initialized at the @@ -511,160 +494,165 @@ int pSeries_system_reset_exception(struct pt_regs *regs) return 0; /* need to perform reset */ } -#define VAL_TO_STRING(ar, val) \ - (((val) < ARRAY_SIZE(ar)) ? ar[(val)] : "Unknown") -static void pseries_print_mce_info(struct pt_regs *regs, - struct rtas_error_log *errp) +static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) { - const char *level, *sevstr; + struct mce_error_info mce_err = { 0 }; + unsigned long eaddr = 0, paddr = 0; struct pseries_errorlog *pseries_log; struct pseries_mc_errorlog *mce_log; - u8 error_type, err_sub_type; - u64 addr; - u8 initiator = rtas_error_initiator(errp); int disposition = rtas_error_disposition(errp); + int initiator = rtas_error_initiator(errp); + int severity = rtas_error_severity(errp); + u8 error_type, err_sub_type; - static const char * const initiators[] = { - [0] = "Unknown", - [1] = "CPU", - [2] = "PCI", - [3] = "ISA", - [4] = "Memory", - [5] = "Power Mgmt", - }; - static const char * const mc_err_types[] = { - [0] = "UE", - [1] = "SLB", - [2] = "ERAT", - [3] = "Unknown", - [4] = "TLB", - [5] = "D-Cache", - [6] = "Unknown", - [7] = "I-Cache", - }; - static const char * const mc_ue_types[] = { - [0] = "Indeterminate", - [1] = "Instruction fetch", - [2] = "Page table walk ifetch", - [3] = "Load/Store", - [4] = "Page table walk Load/Store", - }; - - /* SLB sub errors valid values are 0x0, 0x1, 0x2 */ - static const char * const mc_slb_types[] = { - [0] = "Parity", - [1] = "Multihit", - [2] = "Indeterminate", - }; - - /* TLB and ERAT sub errors valid values are 0x1, 0x2, 0x3 */ - static const char * const mc_soft_types[] = { - [0] = "Unknown", - [1] = "Parity", - [2] = "Multihit", - [3] = "Indeterminate", - }; - - if (!rtas_error_extended(errp)) { - pr_err("Machine check interrupt: Missing extended error log\n"); - return; - } + if (initiator == RTAS_INITIATOR_UNKNOWN) + mce_err.initiator = MCE_INITIATOR_UNKNOWN; + else if (initiator == RTAS_INITIATOR_CPU) + mce_err.initiator = MCE_INITIATOR_CPU; + else if (initiator == RTAS_INITIATOR_PCI) + mce_err.initiator = MCE_INITIATOR_PCI; + else if (initiator == RTAS_INITIATOR_ISA) + mce_err.initiator = MCE_INITIATOR_ISA; + else if (initiator == RTAS_INITIATOR_MEMORY) + mce_err.initiator = MCE_INITIATOR_MEMORY; + else if (initiator == RTAS_INITIATOR_POWERMGM) + mce_err.initiator = MCE_INITIATOR_POWERMGM; + else + mce_err.initiator = MCE_INITIATOR_UNKNOWN; + + if (severity == RTAS_SEVERITY_NO_ERROR) + mce_err.severity = MCE_SEV_NO_ERROR; + else if (severity == RTAS_SEVERITY_EVENT) + mce_err.severity = MCE_SEV_WARNING; + else if (severity == RTAS_SEVERITY_WARNING) + mce_err.severity = MCE_SEV_WARNING; + else if (severity == RTAS_SEVERITY_ERROR_SYNC) + mce_err.severity = MCE_SEV_SEVERE; + else if (severity == RTAS_SEVERITY_ERROR) + mce_err.severity = MCE_SEV_SEVERE; + else if (severity == RTAS_SEVERITY_FATAL) + mce_err.severity = MCE_SEV_FATAL; + else + mce_err.severity = MCE_SEV_FATAL; + + if (severity <= RTAS_SEVERITY_ERROR_SYNC) + mce_err.sync_error = true; + else + mce_err.sync_error = false; + + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN; + mce_err.error_class = MCE_ECLASS_UNKNOWN; + + if (!rtas_error_extended(errp)) + goto out; pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); if (pseries_log == NULL) - return; + goto out; mce_log = (struct pseries_mc_errorlog *)pseries_log->data; - error_type = mce_log->error_type; err_sub_type = rtas_mc_error_sub_type(mce_log); - switch (rtas_error_severity(errp)) { - case RTAS_SEVERITY_NO_ERROR: - level = KERN_INFO; - sevstr = "Harmless"; - break; - case RTAS_SEVERITY_WARNING: - level = KERN_WARNING; - sevstr = ""; - break; - case RTAS_SEVERITY_ERROR: - case RTAS_SEVERITY_ERROR_SYNC: - level = KERN_ERR; - sevstr = "Severe"; - break; - case RTAS_SEVERITY_FATAL: - default: - level = KERN_ERR; - sevstr = "Fatal"; - break; - } + switch (mce_log->error_type) { + case MC_ERROR_TYPE_UE: + mce_err.error_type = MCE_ERROR_TYPE_UE; + switch (err_sub_type) { + case MC_ERROR_UE_IFETCH: + mce_err.u.ue_error_type = MCE_UE_ERROR_IFETCH; + break; + case MC_ERROR_UE_PAGE_TABLE_WALK_IFETCH: + mce_err.u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH; + break; + case MC_ERROR_UE_LOAD_STORE: + mce_err.u.ue_error_type = MCE_UE_ERROR_LOAD_STORE; + break; + case MC_ERROR_UE_PAGE_TABLE_WALK_LOAD_STORE: + mce_err.u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE; + break; + case MC_ERROR_UE_INDETERMINATE: + default: + mce_err.u.ue_error_type = MCE_UE_ERROR_INDETERMINATE; + break; + } + if (mce_log->sub_err_type & UE_EFFECTIVE_ADDR_PROVIDED) + eaddr = be64_to_cpu(mce_log->effective_address); -#ifdef CONFIG_PPC_BOOK3S_64 - /* Display faulty slb contents for SLB errors. */ - if (error_type == MC_ERROR_TYPE_SLB) - slb_dump_contents(local_paca->mce_faulty_slbs); -#endif + if (mce_log->sub_err_type & UE_LOGICAL_ADDR_PROVIDED) { + paddr = be64_to_cpu(mce_log->logical_address); + } else if (mce_log->sub_err_type & UE_EFFECTIVE_ADDR_PROVIDED) { + unsigned long pfn; - printk("%s%s Machine check interrupt [%s]\n", level, sevstr, - disposition == RTAS_DISP_FULLY_RECOVERED ? - "Recovered" : "Not recovered"); - if (user_mode(regs)) { - printk("%s NIP: [%016lx] PID: %d Comm: %s\n", level, - regs->nip, current->pid, current->comm); - } else { - printk("%s NIP [%016lx]: %pS\n", level, regs->nip, - (void *)regs->nip); - } - printk("%s Initiator: %s\n", level, - VAL_TO_STRING(initiators, initiator)); + pfn = addr_to_pfn(regs, eaddr); + if (pfn != ULONG_MAX) + paddr = pfn << PAGE_SHIFT; + } - switch (error_type) { - case MC_ERROR_TYPE_UE: - printk("%s Error type: %s [%s]\n", level, - VAL_TO_STRING(mc_err_types, error_type), - VAL_TO_STRING(mc_ue_types, err_sub_type)); break; case MC_ERROR_TYPE_SLB: - printk("%s Error type: %s [%s]\n", level, - VAL_TO_STRING(mc_err_types, error_type), - VAL_TO_STRING(mc_slb_types, err_sub_type)); + mce_err.error_type = MCE_ERROR_TYPE_SLB; + switch (err_sub_type) { + case MC_ERROR_SLB_PARITY: + mce_err.u.slb_error_type = MCE_SLB_ERROR_PARITY; + break; + case MC_ERROR_SLB_MULTIHIT: + mce_err.u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; + break; + case MC_ERROR_SLB_INDETERMINATE: + default: + mce_err.u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE; + break; + } + if (mce_log->sub_err_type & 0x80) + eaddr = be64_to_cpu(mce_log->effective_address); break; case MC_ERROR_TYPE_ERAT: + mce_err.error_type = MCE_ERROR_TYPE_ERAT; + switch (err_sub_type) { + case MC_ERROR_ERAT_PARITY: + mce_err.u.erat_error_type = MCE_ERAT_ERROR_PARITY; + break; + case MC_ERROR_ERAT_MULTIHIT: + mce_err.u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + break; + case MC_ERROR_ERAT_INDETERMINATE: + default: + mce_err.u.erat_error_type = MCE_ERAT_ERROR_INDETERMINATE; + break; + } + if (mce_log->sub_err_type & 0x80) + eaddr = be64_to_cpu(mce_log->effective_address); + break; case MC_ERROR_TYPE_TLB: - printk("%s Error type: %s [%s]\n", level, - VAL_TO_STRING(mc_err_types, error_type), - VAL_TO_STRING(mc_soft_types, err_sub_type)); + mce_err.error_type = MCE_ERROR_TYPE_TLB; + switch (err_sub_type) { + case MC_ERROR_TLB_PARITY: + mce_err.u.tlb_error_type = MCE_TLB_ERROR_PARITY; + break; + case MC_ERROR_TLB_MULTIHIT: + mce_err.u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; + break; + case MC_ERROR_TLB_INDETERMINATE: + default: + mce_err.u.tlb_error_type = MCE_TLB_ERROR_INDETERMINATE; + break; + } + if (mce_log->sub_err_type & 0x80) + eaddr = be64_to_cpu(mce_log->effective_address); + break; + case MC_ERROR_TYPE_D_CACHE: + mce_err.error_type = MCE_ERROR_TYPE_DCACHE; break; + case MC_ERROR_TYPE_I_CACHE: + mce_err.error_type = MCE_ERROR_TYPE_DCACHE; + break; + case MC_ERROR_TYPE_UNKNOWN: default: - printk("%s Error type: %s\n", level, - VAL_TO_STRING(mc_err_types, error_type)); + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN; break; } - addr = rtas_mc_get_effective_addr(mce_log); - if (addr) - printk("%s Effective address: %016llx\n", level, addr); -} - -static int mce_handle_error(struct rtas_error_log *errp) -{ - struct pseries_errorlog *pseries_log; - struct pseries_mc_errorlog *mce_log; - int disposition = rtas_error_disposition(errp); - u8 error_type; - - if (!rtas_error_extended(errp)) - goto out; - - pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); - if (pseries_log == NULL) - goto out; - - mce_log = (struct pseries_mc_errorlog *)pseries_log->data; - error_type = mce_log->error_type; - #ifdef CONFIG_PPC_BOOK3S_64 if (disposition == RTAS_DISP_NOT_RECOVERED) { switch (error_type) { @@ -682,98 +670,24 @@ static int mce_handle_error(struct rtas_error_log *errp) slb_save_contents(local_paca->mce_faulty_slbs); flush_and_reload_slb(); disposition = RTAS_DISP_FULLY_RECOVERED; - rtas_set_disposition_recovered(errp); break; default: break; } + } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) { + /* Platform corrected itself but could be degraded */ + printk(KERN_ERR "MCE: limited recovery, system may " + "be degraded\n"); + disposition = RTAS_DISP_FULLY_RECOVERED; } #endif out: - return disposition; -} - -#ifdef CONFIG_MEMORY_FAILURE - -static DEFINE_PER_CPU(int, rtas_ue_count); -static DEFINE_PER_CPU(unsigned long, rtas_ue_paddr[MAX_MC_EVT]); + save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED, + &mce_err, regs->nip, eaddr, paddr); -#define UE_EFFECTIVE_ADDR_PROVIDED 0x40 -#define UE_LOGICAL_ADDR_PROVIDED 0x20 - - -static void pseries_hwpoison_work_fn(struct work_struct *work) -{ - unsigned long paddr; - int index; - - while (__this_cpu_read(rtas_ue_count) > 0) { - index = __this_cpu_read(rtas_ue_count) - 1; - paddr = __this_cpu_read(rtas_ue_paddr[index]); - memory_failure(paddr >> PAGE_SHIFT, 0); - __this_cpu_dec(rtas_ue_count); - } -} - -static DECLARE_WORK(hwpoison_work, pseries_hwpoison_work_fn); - -static void queue_ue_paddr(unsigned long paddr) -{ - int index; - - index = __this_cpu_inc_return(rtas_ue_count) - 1; - if (index >= MAX_MC_EVT) { - __this_cpu_dec(rtas_ue_count); - return; - } - this_cpu_write(rtas_ue_paddr[index], paddr); - schedule_work(&hwpoison_work); -} - -static void pseries_do_memory_failure(struct pt_regs *regs, - struct pseries_mc_errorlog *mce_log) -{ - unsigned long paddr; - - if (mce_log->sub_err_type & UE_LOGICAL_ADDR_PROVIDED) { - paddr = be64_to_cpu(mce_log->logical_address); - } else if (mce_log->sub_err_type & UE_EFFECTIVE_ADDR_PROVIDED) { - unsigned long pfn; - - pfn = addr_to_pfn(regs, - be64_to_cpu(mce_log->effective_address)); - if (pfn == ULONG_MAX) - return; - paddr = pfn << PAGE_SHIFT; - } else { - return; - } - queue_ue_paddr(paddr); -} - -static void pseries_process_ue(struct pt_regs *regs, - struct rtas_error_log *errp) -{ - struct pseries_errorlog *pseries_log; - struct pseries_mc_errorlog *mce_log; - - if (!rtas_error_extended(errp)) - return; - - pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); - if (!pseries_log) - return; - - mce_log = (struct pseries_mc_errorlog *)pseries_log->data; - - if (mce_log->error_type == MC_ERROR_TYPE_UE) - pseries_do_memory_failure(regs, mce_log); + return disposition; } -#else -static inline void pseries_process_ue(struct pt_regs *regs, - struct rtas_error_log *errp) { } -#endif /*CONFIG_MEMORY_FAILURE */ /* * Process MCE rtas errlog event. @@ -795,49 +709,51 @@ static void mce_process_errlog_event(struct irq_work *work) * Return 1 if corrected (or delivered a signal). * Return 0 if there is nothing we can do. */ -static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err) +static int recover_mce(struct pt_regs *regs, struct machine_check_event *evt) { int recovered = 0; - int disposition = rtas_error_disposition(err); - - pseries_print_mce_info(regs, err); if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); recovered = 0; - - } else if (disposition == RTAS_DISP_FULLY_RECOVERED) { + } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { /* Platform corrected itself */ recovered = 1; + } else if (evt->severity == MCE_SEV_FATAL) { + /* Fatal machine check */ + pr_err("Machine check interrupt is fatal\n"); + recovered = 0; + } - } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) { - /* Platform corrected itself but could be degraded */ - printk(KERN_ERR "MCE: limited recovery, system may " - "be degraded\n"); - recovered = 1; - - } else if (user_mode(regs) && !is_global_init(current) && - rtas_error_severity(err) == RTAS_SEVERITY_ERROR_SYNC) { - + if (!recovered && evt->sync_error) { /* - * If we received a synchronous error when in userspace - * kill the task. Firmware may report details of the fail - * asynchronously, so we can't rely on the target and type - * fields being valid here. + * Try to kill processes if we get a synchronous machine check + * (e.g., one caused by execution of this instruction). This + * will devolve into a panic if we try to kill init or are in + * an interrupt etc. + * + * TODO: Queue up this address for hwpoisioning later. + * TODO: This is not quite right for d-side machine + * checks ->nip is not necessarily the important + * address. */ - printk(KERN_ERR "MCE: uncorrectable error, killing task " - "%s:%d\n", current->comm, current->pid); - - _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); - recovered = 1; + if ((user_mode(regs))) { + _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); + recovered = 1; + } else if (die_will_crash()) { + /* + * die() would kill the kernel, so better to go via + * the platform reboot code that will log the + * machine check. + */ + recovered = 0; + } else { + die("Machine check", regs, SIGBUS); + recovered = 1; + } } - pseries_process_ue(regs, err); - - /* Queue irq work to log this rtas event later. */ - irq_work_queue(&mce_errlog_process_work); - return recovered; } @@ -853,14 +769,21 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err) */ int pSeries_machine_check_exception(struct pt_regs *regs) { - struct rtas_error_log *errp; + struct machine_check_event evt; - if (fwnmi_active) { - fwnmi_release_errinfo(); - errp = fwnmi_get_errlog(); - if (errp && recover_mce(regs, errp)) - return 1; + if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) + return 0; + + /* Print things out */ + if (evt.version != MCE_V1) { + pr_err("Machine Check Exception, Unknown event version %d !\n", + evt.version); + return 0; } + machine_check_print_event_info(&evt, user_mode(regs), false); + + if (recover_mce(regs, &evt)) + return 1; return 0; } @@ -877,7 +800,12 @@ long pseries_machine_check_realmode(struct pt_regs *regs) * to panic. Hence we will call it as soon as we go into * virtual mode. */ - disposition = mce_handle_error(errp); + disposition = mce_handle_error(regs, errp); + fwnmi_release_errinfo(); + + /* Queue irq work to log this rtas event later. */ + irq_work_queue(&mce_errlog_process_work); + if (disposition == RTAS_DISP_FULLY_RECOVERED) return 1; } diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c new file mode 100644 index 00000000000000..70c3013fdd074f --- /dev/null +++ b/arch/powerpc/platforms/pseries/rtas-fadump.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Firmware-Assisted Dump support on POWERVM platform. + * + * Copyright 2011, Mahesh Salgaonkar, IBM Corporation. + * Copyright 2019, Hari Bathini, IBM Corporation. + */ + +#define pr_fmt(fmt) "rtas fadump: " fmt + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "rtas-fadump.h" + +static struct rtas_fadump_mem_struct fdm; +static const struct rtas_fadump_mem_struct *fdm_active; + +static void rtas_fadump_update_config(struct fw_dump *fadump_conf, + const struct rtas_fadump_mem_struct *fdm) +{ + fadump_conf->boot_mem_dest_addr = + be64_to_cpu(fdm->rmr_region.destination_address); + + fadump_conf->fadumphdr_addr = (fadump_conf->boot_mem_dest_addr + + fadump_conf->boot_memory_size); +} + +/* + * This function is called in the capture kernel to get configuration details + * setup in the first kernel and passed to the f/w. + */ +static void rtas_fadump_get_config(struct fw_dump *fadump_conf, + const struct rtas_fadump_mem_struct *fdm) +{ + fadump_conf->boot_mem_addr[0] = + be64_to_cpu(fdm->rmr_region.source_address); + fadump_conf->boot_mem_sz[0] = be64_to_cpu(fdm->rmr_region.source_len); + fadump_conf->boot_memory_size = fadump_conf->boot_mem_sz[0]; + + fadump_conf->boot_mem_top = fadump_conf->boot_memory_size; + fadump_conf->boot_mem_regs_cnt = 1; + + /* + * Start address of reserve dump area (permanent reservation) for + * re-registering FADump after dump capture. + */ + fadump_conf->reserve_dump_area_start = + be64_to_cpu(fdm->cpu_state_data.destination_address); + + rtas_fadump_update_config(fadump_conf, fdm); +} + +static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) +{ + u64 addr = fadump_conf->reserve_dump_area_start; + + memset(&fdm, 0, sizeof(struct rtas_fadump_mem_struct)); + addr = addr & PAGE_MASK; + + fdm.header.dump_format_version = cpu_to_be32(0x00000001); + fdm.header.dump_num_sections = cpu_to_be16(3); + fdm.header.dump_status_flag = 0; + fdm.header.offset_first_dump_section = + cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, + cpu_state_data)); + + /* + * Fields for disk dump option. + * We are not using disk dump option, hence set these fields to 0. + */ + fdm.header.dd_block_size = 0; + fdm.header.dd_block_offset = 0; + fdm.header.dd_num_blocks = 0; + fdm.header.dd_offset_disk_path = 0; + + /* set 0 to disable an automatic dump-reboot. */ + fdm.header.max_time_auto = 0; + + /* Kernel dump sections */ + /* cpu state data section. */ + fdm.cpu_state_data.request_flag = + cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.cpu_state_data.source_data_type = + cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA); + fdm.cpu_state_data.source_address = 0; + fdm.cpu_state_data.source_len = + cpu_to_be64(fadump_conf->cpu_state_data_size); + fdm.cpu_state_data.destination_address = cpu_to_be64(addr); + addr += fadump_conf->cpu_state_data_size; + + /* hpte region section */ + fdm.hpte_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.hpte_region.source_data_type = + cpu_to_be16(RTAS_FADUMP_HPTE_REGION); + fdm.hpte_region.source_address = 0; + fdm.hpte_region.source_len = + cpu_to_be64(fadump_conf->hpte_region_size); + fdm.hpte_region.destination_address = cpu_to_be64(addr); + addr += fadump_conf->hpte_region_size; + + /* RMA region section */ + fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rmr_region.source_data_type = + cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION); + fdm.rmr_region.source_address = cpu_to_be64(0); + fdm.rmr_region.source_len = cpu_to_be64(fadump_conf->boot_memory_size); + fdm.rmr_region.destination_address = cpu_to_be64(addr); + addr += fadump_conf->boot_memory_size; + + rtas_fadump_update_config(fadump_conf, &fdm); + + return addr; +} + +static u64 rtas_fadump_get_bootmem_min(void) +{ + return RTAS_FADUMP_MIN_BOOT_MEM; +} + +static int rtas_fadump_register(struct fw_dump *fadump_conf) +{ + unsigned int wait_time; + int rc, err = -EIO; + + /* TODO: Add upper time limit for the delay */ + do { + rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1, + NULL, FADUMP_REGISTER, &fdm, + sizeof(struct rtas_fadump_mem_struct)); + + wait_time = rtas_busy_delay_time(rc); + if (wait_time) + mdelay(wait_time); + + } while (wait_time); + + switch (rc) { + case 0: + pr_info("Registration is successful!\n"); + fadump_conf->dump_registered = 1; + err = 0; + break; + case -1: + pr_err("Failed to register. Hardware Error(%d).\n", rc); + break; + case -3: + if (!is_fadump_boot_mem_contiguous()) + pr_err("Can't have holes in boot memory area.\n"); + else if (!is_fadump_reserved_mem_contiguous()) + pr_err("Can't have holes in reserved memory area.\n"); + + pr_err("Failed to register. Parameter Error(%d).\n", rc); + err = -EINVAL; + break; + case -9: + pr_err("Already registered!\n"); + fadump_conf->dump_registered = 1; + err = -EEXIST; + break; + default: + pr_err("Failed to register. Unknown Error(%d).\n", rc); + break; + } + + return err; +} + +static int rtas_fadump_unregister(struct fw_dump *fadump_conf) +{ + unsigned int wait_time; + int rc; + + /* TODO: Add upper time limit for the delay */ + do { + rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1, + NULL, FADUMP_UNREGISTER, &fdm, + sizeof(struct rtas_fadump_mem_struct)); + + wait_time = rtas_busy_delay_time(rc); + if (wait_time) + mdelay(wait_time); + } while (wait_time); + + if (rc) { + pr_err("Failed to un-register - unexpected error(%d).\n", rc); + return -EIO; + } + + fadump_conf->dump_registered = 0; + return 0; +} + +static int rtas_fadump_invalidate(struct fw_dump *fadump_conf) +{ + unsigned int wait_time; + int rc; + + /* TODO: Add upper time limit for the delay */ + do { + rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1, + NULL, FADUMP_INVALIDATE, fdm_active, + sizeof(struct rtas_fadump_mem_struct)); + + wait_time = rtas_busy_delay_time(rc); + if (wait_time) + mdelay(wait_time); + } while (wait_time); + + if (rc) { + pr_err("Failed to invalidate - unexpected error (%d).\n", rc); + return -EIO; + } + + fadump_conf->dump_active = 0; + fdm_active = NULL; + return 0; +} + +#define RTAS_FADUMP_GPR_MASK 0xffffff0000000000 +static inline int rtas_fadump_gpr_index(u64 id) +{ + char str[3]; + int i = -1; + + if ((id & RTAS_FADUMP_GPR_MASK) == fadump_str_to_u64("GPR")) { + /* get the digits at the end */ + id &= ~RTAS_FADUMP_GPR_MASK; + id >>= 24; + str[2] = '\0'; + str[1] = id & 0xff; + str[0] = (id >> 8) & 0xff; + if (kstrtoint(str, 10, &i)) + i = -EINVAL; + if (i > 31) + i = -1; + } + return i; +} + +void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val) +{ + int i; + + i = rtas_fadump_gpr_index(reg_id); + if (i >= 0) + regs->gpr[i] = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("NIA")) + regs->nip = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("MSR")) + regs->msr = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("CTR")) + regs->ctr = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("LR")) + regs->link = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("XER")) + regs->xer = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("CR")) + regs->ccr = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("DAR")) + regs->dar = (unsigned long)reg_val; + else if (reg_id == fadump_str_to_u64("DSISR")) + regs->dsisr = (unsigned long)reg_val; +} + +static struct rtas_fadump_reg_entry* +rtas_fadump_read_regs(struct rtas_fadump_reg_entry *reg_entry, + struct pt_regs *regs) +{ + memset(regs, 0, sizeof(struct pt_regs)); + + while (be64_to_cpu(reg_entry->reg_id) != fadump_str_to_u64("CPUEND")) { + rtas_fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id), + be64_to_cpu(reg_entry->reg_value)); + reg_entry++; + } + reg_entry++; + return reg_entry; +} + +/* + * Read CPU state dump data and convert it into ELF notes. + * The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be + * used to access the data to allow for additional fields to be added without + * affecting compatibility. Each list of registers for a CPU starts with + * "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes, + * 8 Byte ASCII identifier and 8 Byte register value. The register entry + * with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part + * of register value. For more details refer to PAPR document. + * + * Only for the crashing cpu we ignore the CPU dump data and get exact + * state from fadump crash info structure populated by first kernel at the + * time of crash. + */ +static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) +{ + struct rtas_fadump_reg_save_area_header *reg_header; + struct fadump_crash_info_header *fdh = NULL; + struct rtas_fadump_reg_entry *reg_entry; + u32 num_cpus, *note_buf; + int i, rc = 0, cpu = 0; + struct pt_regs regs; + unsigned long addr; + void *vaddr; + + addr = be64_to_cpu(fdm_active->cpu_state_data.destination_address); + vaddr = __va(addr); + + reg_header = vaddr; + if (be64_to_cpu(reg_header->magic_number) != + fadump_str_to_u64("REGSAVE")) { + pr_err("Unable to read register save area.\n"); + return -ENOENT; + } + + pr_debug("--------CPU State Data------------\n"); + pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number)); + pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset)); + + vaddr += be32_to_cpu(reg_header->num_cpu_offset); + num_cpus = be32_to_cpu(*((__be32 *)(vaddr))); + pr_debug("NumCpus : %u\n", num_cpus); + vaddr += sizeof(u32); + reg_entry = (struct rtas_fadump_reg_entry *)vaddr; + + rc = fadump_setup_cpu_notes_buf(num_cpus); + if (rc != 0) + return rc; + + note_buf = (u32 *)fadump_conf->cpu_notes_buf_vaddr; + + if (fadump_conf->fadumphdr_addr) + fdh = __va(fadump_conf->fadumphdr_addr); + + for (i = 0; i < num_cpus; i++) { + if (be64_to_cpu(reg_entry->reg_id) != + fadump_str_to_u64("CPUSTRT")) { + pr_err("Unable to read CPU state data\n"); + rc = -ENOENT; + goto error_out; + } + /* Lower 4 bytes of reg_value contains logical cpu id */ + cpu = (be64_to_cpu(reg_entry->reg_value) & + RTAS_FADUMP_CPU_ID_MASK); + if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) { + RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry); + continue; + } + pr_debug("Reading register data for cpu %d...\n", cpu); + if (fdh && fdh->crashing_cpu == cpu) { + regs = fdh->regs; + note_buf = fadump_regs_to_elf_notes(note_buf, ®s); + RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry); + } else { + reg_entry++; + reg_entry = rtas_fadump_read_regs(reg_entry, ®s); + note_buf = fadump_regs_to_elf_notes(note_buf, ®s); + } + } + final_note(note_buf); + + if (fdh) { + pr_debug("Updating elfcore header (%llx) with cpu notes\n", + fdh->elfcorehdr_addr); + fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); + } + return 0; + +error_out: + fadump_free_cpu_notes_buf(); + return rc; + +} + +/* + * Validate and process the dump data stored by firmware before exporting + * it through '/proc/vmcore'. + */ +static int __init rtas_fadump_process(struct fw_dump *fadump_conf) +{ + struct fadump_crash_info_header *fdh; + int rc = 0; + + if (!fdm_active || !fadump_conf->fadumphdr_addr) + return -EINVAL; + + /* Check if the dump data is valid. */ + if ((be16_to_cpu(fdm_active->header.dump_status_flag) == + RTAS_FADUMP_ERROR_FLAG) || + (fdm_active->cpu_state_data.error_flags != 0) || + (fdm_active->rmr_region.error_flags != 0)) { + pr_err("Dump taken by platform is not valid\n"); + return -EINVAL; + } + if ((fdm_active->rmr_region.bytes_dumped != + fdm_active->rmr_region.source_len) || + !fdm_active->cpu_state_data.bytes_dumped) { + pr_err("Dump taken by platform is incomplete\n"); + return -EINVAL; + } + + /* Validate the fadump crash info header */ + fdh = __va(fadump_conf->fadumphdr_addr); + if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { + pr_err("Crash info header is not valid.\n"); + return -EINVAL; + } + + rc = rtas_fadump_build_cpu_notes(fadump_conf); + if (rc) + return rc; + + /* + * We are done validating dump info and elfcore header is now ready + * to be exported. set elfcorehdr_addr so that vmcore module will + * export the elfcore header through '/proc/vmcore'. + */ + elfcorehdr_addr = fdh->elfcorehdr_addr; + + return 0; +} + +static void rtas_fadump_region_show(struct fw_dump *fadump_conf, + struct seq_file *m) +{ + const struct rtas_fadump_section *cpu_data_section; + const struct rtas_fadump_mem_struct *fdm_ptr; + + if (fdm_active) + fdm_ptr = fdm_active; + else + fdm_ptr = &fdm; + + cpu_data_section = &(fdm_ptr->cpu_state_data); + seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(cpu_data_section->destination_address), + be64_to_cpu(cpu_data_section->destination_address) + + be64_to_cpu(cpu_data_section->source_len) - 1, + be64_to_cpu(cpu_data_section->source_len), + be64_to_cpu(cpu_data_section->bytes_dumped)); + + seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(fdm_ptr->hpte_region.destination_address), + be64_to_cpu(fdm_ptr->hpte_region.destination_address) + + be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, + be64_to_cpu(fdm_ptr->hpte_region.source_len), + be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); + + seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", + be64_to_cpu(fdm_ptr->rmr_region.source_address), + be64_to_cpu(fdm_ptr->rmr_region.destination_address)); + seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", + be64_to_cpu(fdm_ptr->rmr_region.source_len), + be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); + + /* Dump is active. Show reserved area start address. */ + if (fdm_active) { + seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n", + fadump_conf->reserve_dump_area_start); + } +} + +static void rtas_fadump_trigger(struct fadump_crash_info_header *fdh, + const char *msg) +{ + /* Call ibm,os-term rtas call to trigger firmware assisted dump */ + rtas_os_term((char *)msg); +} + +static struct fadump_ops rtas_fadump_ops = { + .fadump_init_mem_struct = rtas_fadump_init_mem_struct, + .fadump_get_bootmem_min = rtas_fadump_get_bootmem_min, + .fadump_register = rtas_fadump_register, + .fadump_unregister = rtas_fadump_unregister, + .fadump_invalidate = rtas_fadump_invalidate, + .fadump_process = rtas_fadump_process, + .fadump_region_show = rtas_fadump_region_show, + .fadump_trigger = rtas_fadump_trigger, +}; + +void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) +{ + int i, size, num_sections; + const __be32 *sections; + const __be32 *token; + + /* + * Check if Firmware Assisted dump is supported. if yes, check + * if dump has been initiated on last reboot. + */ + token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL); + if (!token) + return; + + fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token); + fadump_conf->ops = &rtas_fadump_ops; + fadump_conf->fadump_supported = 1; + + /* Firmware supports 64-bit value for size, align it to pagesize. */ + fadump_conf->max_copy_size = _ALIGN_DOWN(U64_MAX, PAGE_SIZE); + + /* + * The 'ibm,kernel-dump' rtas node is present only if there is + * dump data waiting for us. + */ + fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL); + if (fdm_active) { + pr_info("Firmware-assisted dump is active.\n"); + fadump_conf->dump_active = 1; + rtas_fadump_get_config(fadump_conf, (void *)__pa(fdm_active)); + } + + /* Get the sizes required to store dump data for the firmware provided + * dump sections. + * For each dump section type supported, a 32bit cell which defines + * the ID of a supported section followed by two 32 bit cells which + * gives the size of the section in bytes. + */ + sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes", + &size); + + if (!sections) + return; + + num_sections = size / (3 * sizeof(u32)); + + for (i = 0; i < num_sections; i++, sections += 3) { + u32 type = (u32)of_read_number(sections, 1); + + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + fadump_conf->cpu_state_data_size = + of_read_ulong(§ions[1], 2); + break; + case RTAS_FADUMP_HPTE_REGION: + fadump_conf->hpte_region_size = + of_read_ulong(§ions[1], 2); + break; + } + } +} diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.h b/arch/powerpc/platforms/pseries/rtas-fadump.h new file mode 100644 index 00000000000000..fd59bd7ca9c30d --- /dev/null +++ b/arch/powerpc/platforms/pseries/rtas-fadump.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Firmware-Assisted Dump support on POWERVM platform. + * + * Copyright 2011, Mahesh Salgaonkar, IBM Corporation. + * Copyright 2019, Hari Bathini, IBM Corporation. + */ + +#ifndef _PSERIES_RTAS_FADUMP_H +#define _PSERIES_RTAS_FADUMP_H + +/* + * On some Power systems where RMO is 128MB, it still requires minimum of + * 256MB for kernel to boot successfully. When kdump infrastructure is + * configured to save vmcore over network, we run into OOM issue while + * loading modules related to network setup. Hence we need additional 64M + * of memory to avoid OOM issue. + */ +#define RTAS_FADUMP_MIN_BOOT_MEM ((0x1UL << 28) + (0x1UL << 26)) + +/* Firmware provided dump sections */ +#define RTAS_FADUMP_CPU_STATE_DATA 0x0001 +#define RTAS_FADUMP_HPTE_REGION 0x0002 +#define RTAS_FADUMP_REAL_MODE_REGION 0x0011 + +/* Dump request flag */ +#define RTAS_FADUMP_REQUEST_FLAG 0x00000001 + +/* Dump status flag */ +#define RTAS_FADUMP_ERROR_FLAG 0x2000 + +/* Kernel Dump section info */ +struct rtas_fadump_section { + __be32 request_flag; + __be16 source_data_type; + __be16 error_flags; + __be64 source_address; + __be64 source_len; + __be64 bytes_dumped; + __be64 destination_address; +}; + +/* ibm,configure-kernel-dump header. */ +struct rtas_fadump_section_header { + __be32 dump_format_version; + __be16 dump_num_sections; + __be16 dump_status_flag; + __be32 offset_first_dump_section; + + /* Fields for disk dump option. */ + __be32 dd_block_size; + __be64 dd_block_offset; + __be64 dd_num_blocks; + __be32 dd_offset_disk_path; + + /* Maximum time allowed to prevent an automatic dump-reboot. */ + __be32 max_time_auto; +}; + +/* + * Firmware Assisted dump memory structure. This structure is required for + * registering future kernel dump with power firmware through rtas call. + * + * No disk dump option. Hence disk dump path string section is not included. + */ +struct rtas_fadump_mem_struct { + struct rtas_fadump_section_header header; + + /* Kernel dump sections */ + struct rtas_fadump_section cpu_state_data; + struct rtas_fadump_section hpte_region; + + /* + * TODO: Extend multiple boot memory regions support in the kernel + * for this platform. + */ + struct rtas_fadump_section rmr_region; +}; + +/* + * The firmware-assisted dump format. + * + * The register save area is an area in the partition's memory used to preserve + * the register contents (CPU state data) for the active CPUs during a firmware + * assisted dump. The dump format contains register save area header followed + * by register entries. Each list of registers for a CPU starts with "CPUSTRT" + * and ends with "CPUEND". + */ + +/* Register save area header. */ +struct rtas_fadump_reg_save_area_header { + __be64 magic_number; + __be32 version; + __be32 num_cpu_offset; +}; + +/* Register entry. */ +struct rtas_fadump_reg_entry { + __be64 reg_id; + __be64 reg_value; +}; + +/* Utility macros */ +#define RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry) \ +({ \ + while (be64_to_cpu(reg_entry->reg_id) != \ + fadump_str_to_u64("CPUEND")) \ + reg_entry++; \ + reg_entry++; \ +}) + +#define RTAS_FADUMP_CPU_ID_MASK ((1UL << 32) - 1) + +#endif /* _PSERIES_RTAS_FADUMP_H */ diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index f5940cc71c372f..f8adcd0e45893d 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -69,6 +69,7 @@ #include #include #include +#include #include "pseries.h" #include "../../../../drivers/pci/pci.h" @@ -141,17 +142,19 @@ static void __init fwnmi_init(void) } #ifdef CONFIG_PPC_BOOK3S_64 - /* Allocate per cpu slb area to save old slb contents during MCE */ - size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus; - slb_ptr = memblock_alloc_try_nid_raw(size, sizeof(struct slb_entry), - MEMBLOCK_LOW_LIMIT, ppc64_rma_size, - NUMA_NO_NODE); - if (!slb_ptr) - panic("Failed to allocate %zu bytes below %pa for slb area\n", - size, &ppc64_rma_size); - - for_each_possible_cpu(i) - paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i); + if (!radix_enabled()) { + /* Allocate per cpu area to save old slb contents during MCE */ + size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus; + slb_ptr = memblock_alloc_try_nid_raw(size, + sizeof(struct slb_entry), MEMBLOCK_LOW_LIMIT, + ppc64_rma_size, NUMA_NO_NODE); + if (!slb_ptr) + panic("Failed to allocate %zu bytes below %pa for slb area\n", + size, &ppc64_rma_size); + + for_each_possible_cpu(i) + paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i); + } #endif } @@ -297,8 +300,10 @@ static inline int alloc_dispatch_logs(void) static int alloc_dispatch_log_kmem_cache(void) { + void (*ctor)(void *) = get_dtl_cache_ctor(); + dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, - DISPATCH_LOG_BYTES, 0, NULL); + DISPATCH_LOG_BYTES, 0, ctor); if (!dtl_cache) { pr_warn("Failed to create dispatch trace log buffer cache\n"); pr_warn("Stolen time statistics will be unreliable\n"); @@ -316,6 +321,9 @@ static void pseries_lpar_idle(void) * low power mode by ceding processor to hypervisor */ + if (!prep_irq_for_idle()) + return; + /* Indicate to hypervisor that we are idle. */ get_lppaca()->idle = 1; diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 4b3ef8d9c63f97..ad61e90032da9c 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -41,6 +41,7 @@ #include #include #include +#include #include "pseries.h" #include "offline_states.h" @@ -221,7 +222,7 @@ static __init void pSeries_smp_probe_xics(void) { xics_smp_probe(); - if (cpu_has_feature(CPU_FTR_DBELL)) + if (cpu_has_feature(CPU_FTR_DBELL) && !is_secure_guest()) smp_ops->cause_ipi = smp_pseries_cause_ipi; else smp_ops->cause_ipi = icp_ops->cause_ipi; diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c new file mode 100644 index 00000000000000..40c0637203d5bf --- /dev/null +++ b/arch/powerpc/platforms/pseries/svm.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Secure VM platform + * + * Copyright 2018 IBM Corporation + * Author: Anshuman Khandual + */ + +#include +#include +#include +#include +#include + +static int __init init_svm(void) +{ + if (!is_secure_guest()) + return 0; + + /* Don't release the SWIOTLB buffer. */ + ppc_swiotlb_enable = 1; + + /* + * Since the guest memory is inaccessible to the host, devices always + * need to use the SWIOTLB buffer for DMA even if dma_capable() says + * otherwise. + */ + swiotlb_force = SWIOTLB_FORCE; + + /* Share the SWIOTLB buffer with the host. */ + swiotlb_update_mem_attributes(); + + return 0; +} +machine_early_initcall(pseries, init_svm); + +int set_memory_encrypted(unsigned long addr, int numpages) +{ + if (!PAGE_ALIGNED(addr)) + return -EINVAL; + + uv_unshare_page(PHYS_PFN(__pa(addr)), numpages); + + return 0; +} + +int set_memory_decrypted(unsigned long addr, int numpages) +{ + if (!PAGE_ALIGNED(addr)) + return -EINVAL; + + uv_share_page(PHYS_PFN(__pa(addr)), numpages); + + return 0; +} + +/* There's one dispatch log per CPU. */ +#define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE) + +static struct page *dtl_page_store[NR_DTL_PAGE]; +static long dtl_nr_pages; + +static bool is_dtl_page_shared(struct page *page) +{ + long i; + + for (i = 0; i < dtl_nr_pages; i++) + if (dtl_page_store[i] == page) + return true; + + return false; +} + +void dtl_cache_ctor(void *addr) +{ + unsigned long pfn = PHYS_PFN(__pa(addr)); + struct page *page = pfn_to_page(pfn); + + if (!is_dtl_page_shared(page)) { + dtl_page_store[dtl_nr_pages] = page; + dtl_nr_pages++; + WARN_ON(dtl_nr_pages >= NR_DTL_PAGE); + uv_share_page(pfn, 1); + } +} diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 3473eef7628c37..79e2287991dbb8 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1193,7 +1193,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) else tbl->it_ops = &iommu_table_pseries_ops; - return iommu_init_table(tbl, -1); + return iommu_init_table(tbl, -1, 0, 0); } /** diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index d23288c4abf668..9ebcc13375603c 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig @@ -28,13 +28,6 @@ config PPC_MSI_BITMAP source "arch/powerpc/sysdev/xics/Kconfig" source "arch/powerpc/sysdev/xive/Kconfig" -config PPC_SCOM - bool - -config SCOM_DEBUGFS - bool "Expose SCOM controllers via debugfs" - depends on PPC_SCOM && DEBUG_FS - config GE_FPGA bool diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 9d73dfddf0600a..603b3c656d191e 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -49,8 +49,6 @@ ifdef CONFIG_SUSPEND obj-$(CONFIG_PPC_BOOK3S_32) += 6xx-suspend.o endif -obj-$(CONFIG_PPC_SCOM) += scom.o - obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o obj-$(CONFIG_PPC_XICS) += xics/ diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 21a1fae0714ea1..6b4a34b36d9879 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -344,7 +344,7 @@ static void iommu_table_dart_setup(void) iommu_table_dart.it_index = 0; iommu_table_dart.it_blocksize = 1; iommu_table_dart.it_ops = &iommu_dart_ops; - iommu_init_table(&iommu_table_dart, -1); + iommu_init_table(&iommu_table_dart, -1, 0, 0); /* Reserve the last page of the DART to avoid possible prefetch * past the DART mapped area diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c deleted file mode 100644 index 94e885bf3aeec9..00000000000000 --- a/arch/powerpc/sysdev/scom.c +++ /dev/null @@ -1,223 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2010 Benjamin Herrenschmidt, IBM Corp - * - * and David Gibson, IBM Corporation. - */ - -#include -#include -#include -#include -#include -#include -#include - -const struct scom_controller *scom_controller; -EXPORT_SYMBOL_GPL(scom_controller); - -struct device_node *scom_find_parent(struct device_node *node) -{ - struct device_node *par, *tmp; - const u32 *p; - - for (par = of_node_get(node); par;) { - if (of_get_property(par, "scom-controller", NULL)) - break; - p = of_get_property(par, "scom-parent", NULL); - tmp = par; - if (p == NULL) - par = of_get_parent(par); - else - par = of_find_node_by_phandle(*p); - of_node_put(tmp); - } - return par; -} -EXPORT_SYMBOL_GPL(scom_find_parent); - -scom_map_t scom_map_device(struct device_node *dev, int index) -{ - struct device_node *parent; - unsigned int cells, size; - const __be32 *prop, *sprop; - u64 reg, cnt; - scom_map_t ret; - - parent = scom_find_parent(dev); - - if (parent == NULL) - return NULL; - - /* - * We support "scom-reg" properties for adding scom registers - * to a random device-tree node with an explicit scom-parent - * - * We also support the simple "reg" property if the device is - * a direct child of a scom controller. - * - * In case both exist, "scom-reg" takes precedence. - */ - prop = of_get_property(dev, "scom-reg", &size); - sprop = of_get_property(parent, "#scom-cells", NULL); - if (!prop && parent == dev->parent) { - prop = of_get_property(dev, "reg", &size); - sprop = of_get_property(parent, "#address-cells", NULL); - } - if (!prop) - return NULL; - cells = sprop ? be32_to_cpup(sprop) : 1; - size >>= 2; - - if (index >= (size / (2*cells))) - return NULL; - - reg = of_read_number(&prop[index * cells * 2], cells); - cnt = of_read_number(&prop[index * cells * 2 + cells], cells); - - ret = scom_map(parent, reg, cnt); - of_node_put(parent); - - return ret; -} -EXPORT_SYMBOL_GPL(scom_map_device); - -#ifdef CONFIG_SCOM_DEBUGFS -struct scom_debug_entry { - struct device_node *dn; - struct debugfs_blob_wrapper path; - char name[16]; -}; - -static ssize_t scom_debug_read(struct file *filp, char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct scom_debug_entry *ent = filp->private_data; - u64 __user *ubuf64 = (u64 __user *)ubuf; - loff_t off = *ppos; - ssize_t done = 0; - u64 reg, reg_cnt, val; - scom_map_t map; - int rc; - - if (off < 0 || (off & 7) || (count & 7)) - return -EINVAL; - reg = off >> 3; - reg_cnt = count >> 3; - - map = scom_map(ent->dn, reg, reg_cnt); - if (!scom_map_ok(map)) - return -ENXIO; - - for (reg = 0; reg < reg_cnt; reg++) { - rc = scom_read(map, reg, &val); - if (!rc) - rc = put_user(val, ubuf64); - if (rc) { - if (!done) - done = rc; - break; - } - ubuf64++; - *ppos += 8; - done += 8; - } - scom_unmap(map); - return done; -} - -static ssize_t scom_debug_write(struct file* filp, const char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct scom_debug_entry *ent = filp->private_data; - u64 __user *ubuf64 = (u64 __user *)ubuf; - loff_t off = *ppos; - ssize_t done = 0; - u64 reg, reg_cnt, val; - scom_map_t map; - int rc; - - if (off < 0 || (off & 7) || (count & 7)) - return -EINVAL; - reg = off >> 3; - reg_cnt = count >> 3; - - map = scom_map(ent->dn, reg, reg_cnt); - if (!scom_map_ok(map)) - return -ENXIO; - - for (reg = 0; reg < reg_cnt; reg++) { - rc = get_user(val, ubuf64); - if (!rc) - rc = scom_write(map, reg, val); - if (rc) { - if (!done) - done = rc; - break; - } - ubuf64++; - done += 8; - } - scom_unmap(map); - return done; -} - -static const struct file_operations scom_debug_fops = { - .read = scom_debug_read, - .write = scom_debug_write, - .open = simple_open, - .llseek = default_llseek, -}; - -static int scom_debug_init_one(struct dentry *root, struct device_node *dn, - int i) -{ - struct scom_debug_entry *ent; - struct dentry *dir; - - ent = kzalloc(sizeof(*ent), GFP_KERNEL); - if (!ent) - return -ENOMEM; - - ent->dn = of_node_get(dn); - snprintf(ent->name, 16, "%08x", i); - ent->path.data = (void*)kasprintf(GFP_KERNEL, "%pOF", dn); - ent->path.size = strlen((char *)ent->path.data); - - dir = debugfs_create_dir(ent->name, root); - if (!dir) { - of_node_put(dn); - kfree(ent->path.data); - kfree(ent); - return -1; - } - - debugfs_create_blob("devspec", 0400, dir, &ent->path); - debugfs_create_file("access", 0600, dir, ent, &scom_debug_fops); - - return 0; -} - -static int scom_debug_init(void) -{ - struct device_node *dn; - struct dentry *root; - int i, rc; - - root = debugfs_create_dir("scom", powerpc_debugfs_root); - if (!root) - return -1; - - i = rc = 0; - for_each_node_with_property(dn, "scom-controller") { - int id = of_get_ibm_chip_id(dn); - if (id == -1) - id = i; - rc |= scom_debug_init_one(root, dn, id); - i++; - } - - return rc; -} -device_initcall(scom_debug_init); -#endif /* CONFIG_SCOM_DEBUGFS */ diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index be86fce1a84e69..df832b09e3e9ce 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -196,7 +196,7 @@ static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) /* * This is used to perform the magic loads from an ESB - * described in xive.h + * described in xive-regs.h */ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) { @@ -237,26 +237,61 @@ static notrace void xive_dump_eq(const char *name, struct xive_q *q) i0 = be32_to_cpup(q->qpage + idx); idx = (idx + 1) & q->msk; i1 = be32_to_cpup(q->qpage + idx); - xmon_printf(" %s Q T=%d %08x %08x ...\n", name, - q->toggle, i0, i1); + xmon_printf("%s idx=%d T=%d %08x %08x ...", name, + q->idx, q->toggle, i0, i1); } notrace void xmon_xive_do_dump(int cpu) { struct xive_cpu *xc = per_cpu(xive_cpu, cpu); - xmon_printf("XIVE state for CPU %d:\n", cpu); - xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr); - xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]); + xmon_printf("CPU %d:", cpu); + if (xc) { + xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); + #ifdef CONFIG_SMP - { - u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); - xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi, - val & XIVE_ESB_VAL_P ? 'P' : 'p', - val & XIVE_ESB_VAL_Q ? 'Q' : 'q'); - } + { + u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); + + xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, + val & XIVE_ESB_VAL_P ? 'P' : '-', + val & XIVE_ESB_VAL_Q ? 'Q' : '-'); + } #endif + xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); + } + xmon_printf("\n"); +} + +int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) +{ + int rc; + u32 target; + u8 prio; + u32 lirq; + + rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); + if (rc) { + xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); + return rc; + } + + xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", + hw_irq, target, prio, lirq); + + if (d) { + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + u64 val = xive_esb_read(xd, XIVE_ESB_GET); + + xmon_printf("PQ=%c%c", + val & XIVE_ESB_VAL_P ? 'P' : '-', + val & XIVE_ESB_VAL_Q ? 'Q' : '-'); + } + + xmon_printf("\n"); + return 0; } + #endif /* CONFIG_XMON */ static unsigned int xive_get_irq(void) diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 37987c815913a9..0ff6b739052c8f 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -111,6 +111,20 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) } EXPORT_SYMBOL_GPL(xive_native_configure_irq); +static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio, + u32 *sw_irq) +{ + s64 rc; + __be64 vp; + __be32 lirq; + + rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq); + + *target = be64_to_cpu(vp); + *sw_irq = be32_to_cpu(lirq); + + return rc == 0 ? 0 : -ENXIO; +} /* This can be called multiple time to change a queue configuration */ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, @@ -231,6 +245,17 @@ static bool xive_native_match(struct device_node *node) return of_device_is_compatible(node, "ibm,opal-xive-vc"); } +static s64 opal_xive_allocate_irq(u32 chip_id) +{ + s64 irq = opal_xive_allocate_irq_raw(chip_id); + + /* + * Old versions of skiboot can incorrectly return 0xffffffff to + * indicate no space, fix it up here. + */ + return irq == 0xffffffff ? OPAL_RESOURCE : irq; +} + #ifdef CONFIG_SMP static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc) { @@ -442,6 +467,7 @@ EXPORT_SYMBOL_GPL(xive_native_sync_queue); static const struct xive_ops xive_native_ops = { .populate_irq_data = xive_native_populate_irq_data, .configure_irq = xive_native_configure_irq, + .get_irq_config = xive_native_get_irq_config, .setup_queue = xive_native_setup_queue, .cleanup_queue = xive_native_cleanup_queue, .match = xive_native_match, diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 8ef9cf4ebb1c60..33c10749edec99 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -45,7 +45,7 @@ static int xive_irq_bitmap_add(int base, int count) { struct xive_irq_bitmap *xibm; - xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC); + xibm = kzalloc(sizeof(*xibm), GFP_KERNEL); if (!xibm) return -ENOMEM; @@ -53,6 +53,10 @@ static int xive_irq_bitmap_add(int base, int count) xibm->base = base; xibm->count = count; xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL); + if (!xibm->bitmap) { + kfree(xibm); + return -ENOMEM; + } list_add(&xibm->list, &xive_irq_bitmaps); pr_info("Using IRQ range [%x-%x]", xibm->base, @@ -211,6 +215,38 @@ static long plpar_int_set_source_config(unsigned long flags, return 0; } +static long plpar_int_get_source_config(unsigned long flags, + unsigned long lisn, + unsigned long *target, + unsigned long *prio, + unsigned long *sw_irq) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn); + + do { + rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn, + target, prio, sw_irq); + } while (plpar_busy_delay(rc)); + + if (rc) { + pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n", + lisn, rc); + return rc; + } + + *target = retbuf[0]; + *prio = retbuf[1]; + *sw_irq = retbuf[2]; + + pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n", + retbuf[0], retbuf[1], retbuf[2]); + + return 0; +} + static long plpar_int_get_queue_info(unsigned long flags, unsigned long target, unsigned long priority, @@ -394,6 +430,24 @@ static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) return rc == 0 ? 0 : -ENXIO; } +static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio, + u32 *sw_irq) +{ + long rc; + unsigned long h_target; + unsigned long h_prio; + unsigned long h_sw_irq; + + rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio, + &h_sw_irq); + + *target = h_target; + *prio = h_prio; + *sw_irq = h_sw_irq; + + return rc == 0 ? 0 : -ENXIO; +} + /* This can be called multiple time to change a queue configuration */ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, __be32 *qpage, u32 order) @@ -586,6 +640,7 @@ static void xive_spapr_sync_source(u32 hw_irq) static const struct xive_ops xive_spapr_ops = { .populate_irq_data = xive_spapr_populate_irq_data, .configure_irq = xive_spapr_configure_irq, + .get_irq_config = xive_spapr_get_irq_config, .setup_queue = xive_spapr_setup_queue, .cleanup_queue = xive_spapr_cleanup_queue, .match = xive_spapr_match, diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h index 211725dbf364ca..59cd366e7933a7 100644 --- a/arch/powerpc/sysdev/xive/xive-internal.h +++ b/arch/powerpc/sysdev/xive/xive-internal.h @@ -33,6 +33,8 @@ struct xive_cpu { struct xive_ops { int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data); int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); + int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio, + u32 *sw_irq); int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio); void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio); void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 14e56c25879fae..d83364ebc5c5ad 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2534,13 +2534,16 @@ static void dump_pacas(void) static void dump_one_xive(int cpu) { unsigned int hwid = get_hard_smp_processor_id(cpu); - - opal_xive_dump(XIVE_DUMP_TM_HYP, hwid); - opal_xive_dump(XIVE_DUMP_TM_POOL, hwid); - opal_xive_dump(XIVE_DUMP_TM_OS, hwid); - opal_xive_dump(XIVE_DUMP_TM_USER, hwid); - opal_xive_dump(XIVE_DUMP_VP, hwid); - opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid); + bool hv = cpu_has_feature(CPU_FTR_HVMODE); + + if (hv) { + opal_xive_dump(XIVE_DUMP_TM_HYP, hwid); + opal_xive_dump(XIVE_DUMP_TM_POOL, hwid); + opal_xive_dump(XIVE_DUMP_TM_OS, hwid); + opal_xive_dump(XIVE_DUMP_TM_USER, hwid); + opal_xive_dump(XIVE_DUMP_VP, hwid); + opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid); + } if (setjmp(bus_error_jmp) != 0) { catch_memory_errors = 0; @@ -2569,16 +2572,28 @@ static void dump_all_xives(void) dump_one_xive(cpu); } -static void dump_one_xive_irq(u32 num) +static void dump_one_xive_irq(u32 num, struct irq_data *d) +{ + xmon_xive_get_irq_config(num, d); +} + +static void dump_all_xive_irq(void) { - s64 rc; - __be64 vp; - u8 prio; - __be32 lirq; - - rc = opal_xive_get_irq_config(num, &vp, &prio, &lirq); - xmon_printf("IRQ 0x%x config: vp=0x%llx prio=%d lirq=0x%x (rc=%lld)\n", - num, be64_to_cpu(vp), prio, be32_to_cpu(lirq), rc); + unsigned int i; + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_data *d = irq_desc_get_irq_data(desc); + unsigned int hwirq; + + if (!d) + continue; + + hwirq = (unsigned int)irqd_to_hwirq(d); + /* IPIs are special (HW number 0) */ + if (hwirq) + dump_one_xive_irq(hwirq, d); + } } static void dump_xives(void) @@ -2597,7 +2612,9 @@ static void dump_xives(void) return; } else if (c == 'i') { if (scanhex(&num)) - dump_one_xive_irq(num); + dump_one_xive_irq(num, NULL); + else + dump_all_xive_irq(); return; } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 1211543c330ccc..71d29fb4008a7f 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -31,6 +31,7 @@ config RISCV select GENERIC_SMP_IDLE_THREAD select GENERIC_ATOMIC64 if !64BIT select HAVE_ARCH_AUDITSYSCALL + select HAVE_ASM_MODVERSIONS select HAVE_MEMBLOCK_NODE_MAP select HAVE_DMA_CONTIGUOUS select HAVE_FUTEX_CMPXCHG if FUTEX diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 4f0a3d2018d2db..f5e91421024546 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -52,7 +52,7 @@ ifeq ($(CONFIG_CMODEL_MEDANY),y) KBUILD_CFLAGS += -mcmodel=medany endif ifeq ($(CONFIG_MODULE_SECTIONS),y) - KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/riscv/kernel/module.lds + KBUILD_LDS_MODULE += $(srctree)/arch/riscv/kernel/module.lds endif ifeq ($(CONFIG_PERF_EVENTS),y) KBUILD_CFLAGS += -fno-omit-frame-pointer diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8c5b05d911067b..f933a473b128e3 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -1,7 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -config ARCH_HAS_MEM_ENCRYPT - def_bool y - config MMU def_bool y @@ -68,6 +65,7 @@ config S390 select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV + select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX @@ -132,6 +130,7 @@ config S390 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_VMAP_STACK + select HAVE_ASM_MODVERSIONS select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index 3eb018508190e2..2542cbf7e2d180 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -4,10 +4,7 @@ #ifndef __ASSEMBLY__ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } -extern bool sev_active(void); +static inline bool mem_encrypt_active(void) { return false; } int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 20340a03ad90c5..a124f19f7b3cc4 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -156,14 +156,9 @@ int set_memory_decrypted(unsigned long addr, int numpages) } /* are we a protected virtualization guest? */ -bool sev_active(void) -{ - return is_prot_virt_guest(); -} - bool force_dma_unencrypted(struct device *dev) { - return sev_active(); + return is_prot_virt_guest(); } /* protected virtualization */ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 7926a2e11bdc2d..fbc1aecf0f94ce 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -17,6 +17,7 @@ config SPARC select ARCH_MIGHT_HAVE_PC_SERIO select OF select OF_PROMTREE + select HAVE_ASM_MODVERSIONS select HAVE_IDE select HAVE_OPROFILE select HAVE_ARCH_KGDB if !SMP || SPARC64 diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 3c3adfc486f21d..fec6b4ca2b6e54 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -9,6 +9,7 @@ config UML select ARCH_NO_PREEMPT select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ASM_MODVERSIONS select HAVE_UID16 select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 039fd7abc557a9..98c23f2e6b2999 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -68,6 +68,7 @@ config X86 select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 + select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PTE_DEVMAP if X86_64 @@ -147,6 +148,7 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_VMAP_STACK if X86_64 select HAVE_ARCH_WITHIN_STACK_FRAMES + select HAVE_ASM_MODVERSIONS select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING if X86_64 @@ -1526,9 +1528,6 @@ config X86_CPA_STATISTICS helps to determine the effectiveness of preserving large and huge page mappings when mapping protections are changed. -config ARCH_HAS_MEM_ENCRYPT - def_bool y - config AMD_MEM_ENCRYPT bool "AMD Secure Memory Encryption (SME) support" depends on X86_64 && CPU_SUP_AMD diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 8df54913819380..0f2154106d0168 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -89,6 +89,7 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS # CFLAGS_REMOVE_vdso-note.o = -pg CFLAGS_REMOVE_vclock_gettime.o = -pg +CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg CFLAGS_REMOVE_vgetcpu.o = -pg CFLAGS_REMOVE_vvar.o = -pg @@ -128,7 +129,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE $(call if_changed,vdso_and_check) -CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) +CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds) VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1 targets += vdso32/vdso32.lds diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 0c196c47d6215f..848ce43b9040d6 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; +static inline bool mem_encrypt_active(void) +{ + return sme_me_mask; +} + +static inline u64 sme_get_me_mask(void) +{ + return sme_me_mask; +} + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 22369dd5de3b87..045e82e8945b36 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, { return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); } + +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) +{ + return read_from_oldmem(buf, count, ppos, 0, sev_active()); +} diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index fece30ca8b0cb9..9268c12458c848 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -344,13 +344,11 @@ bool sme_active(void) { return sme_me_mask && !sev_enabled; } -EXPORT_SYMBOL(sme_active); bool sev_active(void) { return sme_me_mask && sev_enabled; } -EXPORT_SYMBOL(sev_active); /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 2db474ab4c6bea..9207ac2913412e 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * ti-sysc.c - Texas Instruments sysc interconnect target driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include @@ -62,18 +54,26 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @module_size: size of the interconnect target module * @module_va: virtual address of the interconnect target module * @offsets: register offsets from module base + * @mdata: ti-sysc to hwmod translation data for a module * @clocks: clocks used by the interconnect target module * @clock_roles: clock role names for the found clocks * @nr_clocks: number of clocks used by the interconnect target module + * @rsts: resets used by the interconnect target module * @legacy_mode: configured for legacy mode if set * @cap: interconnect target module capabilities * @cfg: interconnect target module configuration + * @cookie: data used by legacy platform callbacks * @name: name if available * @revision: interconnect target module revision + * @enabled: sysc runtime enabled status * @needs_resume: runtime resume needed on resume from suspend + * @child_needs_resume: runtime resume needed for child on resume from suspend + * @disable_on_idle: status flag used for disabling modules with resets + * @idle_work: work structure used to perform delayed idle on a module * @clk_enable_quirk: module specific clock enable quirk * @clk_disable_quirk: module specific clock disable quirk * @reset_done_quirk: module specific reset done quirk + * @module_enable_quirk: module specific enable quirk */ struct sysc { struct device *dev; @@ -95,11 +95,11 @@ struct sysc { unsigned int enabled:1; unsigned int needs_resume:1; unsigned int child_needs_resume:1; - unsigned int disable_on_idle:1; struct delayed_work idle_work; void (*clk_enable_quirk)(struct sysc *sysc); void (*clk_disable_quirk)(struct sysc *sysc); void (*reset_done_quirk)(struct sysc *sysc); + void (*module_enable_quirk)(struct sysc *sysc); }; static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, @@ -503,7 +503,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata) static int sysc_init_resets(struct sysc *ddata) { ddata->rsts = - devm_reset_control_get_optional(ddata->dev, "rstctrl"); + devm_reset_control_get_optional_shared(ddata->dev, "rstctrl"); if (IS_ERR(ddata->rsts)) return PTR_ERR(ddata->rsts); @@ -615,8 +615,8 @@ static void sysc_check_quirk_stdout(struct sysc *ddata, * node but children have "ti,hwmods". These belong to the interconnect * target node and are managed by this driver. */ -static int sysc_check_one_child(struct sysc *ddata, - struct device_node *np) +static void sysc_check_one_child(struct sysc *ddata, + struct device_node *np) { const char *name; @@ -626,22 +626,14 @@ static int sysc_check_one_child(struct sysc *ddata, sysc_check_quirk_stdout(ddata, np); sysc_parse_dts_quirks(ddata, np, true); - - return 0; } -static int sysc_check_children(struct sysc *ddata) +static void sysc_check_children(struct sysc *ddata) { struct device_node *child; - int error; - - for_each_child_of_node(ddata->dev->of_node, child) { - error = sysc_check_one_child(ddata, child); - if (error) - return error; - } - return 0; + for_each_child_of_node(ddata->dev->of_node, child) + sysc_check_one_child(ddata, child); } /* @@ -794,9 +786,7 @@ static int sysc_map_and_check_registers(struct sysc *ddata) if (error) return error; - error = sysc_check_children(ddata); - if (error) - return error; + sysc_check_children(ddata); error = sysc_parse_registers(ddata); if (error) @@ -940,6 +930,9 @@ static int sysc_enable_module(struct device *dev) sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); } + if (ddata->module_enable_quirk) + ddata->module_enable_quirk(ddata); + return 0; } @@ -1031,8 +1024,7 @@ static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev, dev_err(dev, "%s: could not idle: %i\n", __func__, error); - if (ddata->disable_on_idle) - reset_control_assert(ddata->rsts); + reset_control_assert(ddata->rsts); return 0; } @@ -1043,8 +1035,7 @@ static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev, struct ti_sysc_platform_data *pdata; int error; - if (ddata->disable_on_idle) - reset_control_deassert(ddata->rsts); + reset_control_deassert(ddata->rsts); pdata = dev_get_platdata(ddata->dev); if (!pdata) @@ -1091,10 +1082,9 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev) ddata->enabled = false; err_allow_idle: - sysc_clkdm_allow_idle(ddata); + reset_control_assert(ddata->rsts); - if (ddata->disable_on_idle) - reset_control_assert(ddata->rsts); + sysc_clkdm_allow_idle(ddata); return error; } @@ -1109,11 +1099,11 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev) if (ddata->enabled) return 0; - if (ddata->disable_on_idle) - reset_control_deassert(ddata->rsts); sysc_clkdm_deny_idle(ddata); + reset_control_deassert(ddata->rsts); + if (sysc_opt_clks_needed(ddata)) { error = sysc_enable_opt_clocks(ddata); if (error) @@ -1256,6 +1246,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_I2C), SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, SYSC_MODULE_QUIRK_I2C), + SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), + SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, + SYSC_MODULE_QUIRK_SGX), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), @@ -1271,8 +1264,11 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), + SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0), + SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0), SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0), @@ -1424,6 +1420,15 @@ static void sysc_clk_disable_quirk_i2c(struct sysc *ddata) sysc_clk_quirk_i2c(ddata, false); } +/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ +static void sysc_module_enable_quirk_sgx(struct sysc *ddata) +{ + int offset = 0xff08; /* OCP_DEBUG_CONFIG */ + u32 val = BIT(31); /* THALIA_INT_BYPASS */ + + sysc_write(ddata, offset, val); +} + /* Watchdog timer needs a disable sequence after reset */ static void sysc_reset_done_quirk_wdt(struct sysc *ddata) { @@ -1466,6 +1471,9 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; } + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) + ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) ddata->reset_done_quirk = sysc_reset_done_quirk_wdt; } @@ -1532,7 +1540,7 @@ static int sysc_legacy_init(struct sysc *ddata) */ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset) { - int error, val; + int error; if (!ddata->rsts) return 0; @@ -1543,14 +1551,9 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset) return error; } - error = reset_control_deassert(ddata->rsts); - if (error == -EEXIST) - return 0; + reset_control_deassert(ddata->rsts); - error = readx_poll_timeout(reset_control_status, ddata->rsts, val, - val == 0, 100, MAX_MODULE_SOFTRESET_WAIT); - - return error; + return 0; } /* @@ -1559,12 +1562,11 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset) */ static int sysc_reset(struct sysc *ddata) { - int sysc_offset, syss_offset, sysc_val, rstval, quirks, error = 0; + int sysc_offset, syss_offset, sysc_val, rstval, error = 0; u32 sysc_mask, syss_done; sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; syss_offset = ddata->offsets[SYSC_SYSSTATUS]; - quirks = ddata->cfg.quirks; if (ddata->legacy_mode || sysc_offset < 0 || ddata->cap->regbits->srst_shift < 0 || @@ -2427,6 +2429,10 @@ static int sysc_probe(struct platform_device *pdev) goto unprepare; } + /* Balance reset counts */ + if (ddata->rsts) + reset_control_assert(ddata->rsts); + sysc_show_registers(ddata); ddata->dev->type = &sysc_device_type; @@ -2446,9 +2452,6 @@ static int sysc_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); } - if (!of_get_available_child_count(ddata->dev->of_node)) - ddata->disable_on_idle = true; - return 0; err: diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 801fa1cd03217c..c44247d0b83e8a 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -116,7 +116,6 @@ config COMMON_CLK_SI514 depends on OF select REGMAP_I2C help - ---help--- This driver supports the Silicon Labs 514 programmable clock generator. @@ -125,7 +124,6 @@ config COMMON_CLK_SI544 depends on I2C select REGMAP_I2C help - ---help--- This driver supports the Silicon Labs 544 programmable clock generator. @@ -135,7 +133,6 @@ config COMMON_CLK_SI570 depends on OF select REGMAP_I2C help - ---help--- This driver supports Silicon Labs 570/571/598/599 programmable clock generators. @@ -153,7 +150,6 @@ config COMMON_CLK_CDCE925 depends on OF select REGMAP_I2C help - ---help--- This driver supports the TI CDCE913/925/937/949 programmable clock synthesizer. Each chip has different number of PLLs and outputs. For example, the CDCE925 contains two PLLs with spread-spectrum @@ -212,7 +208,6 @@ config COMMON_CLK_AXI_CLKGEN tristate "AXI clkgen driver" depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST help - ---help--- Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx FPGAs. It is commonly used in Analog Devices' reference designs. @@ -279,26 +274,22 @@ config COMMON_CLK_VC5 depends on OF select REGMAP_I2C help - ---help--- This driver supports the IDT VersaClock 5 and VersaClock 6 programmable clock generators. config COMMON_CLK_STM32MP157 def_bool COMMON_CLK && MACH_STM32MP157 help - ---help--- Support for stm32mp157 SoC family clocks config COMMON_CLK_STM32F def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746) help - ---help--- Support for stm32f4 and stm32f7 SoC families clocks config COMMON_CLK_STM32H7 def_bool COMMON_CLK && MACH_STM32H743 help - ---help--- Support for stm32h7 SoC family clocks config COMMON_CLK_BD718XX diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 0cad76021297f4..0138fb14e6f883 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o +obj-$(CONFIG_MACH_ASPEED_G6) += clk-ast2600.o obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c index 32dd29e0a37e18..4de97cc7cb54d5 100644 --- a/drivers/clk/actions/owl-common.c +++ b/drivers/clk/actions/owl-common.c @@ -68,16 +68,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks) struct clk_hw *hw; for (i = 0; i < hw_clks->num; i++) { + const char *name; hw = hw_clks->hws[i]; - if (IS_ERR_OR_NULL(hw)) continue; + name = hw->init->name; ret = devm_clk_hw_register(dev, hw); if (ret) { dev_err(dev, "Couldn't register clock %d - %s\n", - i, hw->init->name); + i, name); return ret; } } diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c index 317d4a9e112ee9..f15e2621fa185e 100644 --- a/drivers/clk/actions/owl-factor.c +++ b/drivers/clk/actions/owl-factor.c @@ -64,11 +64,10 @@ static unsigned int _get_table_val(const struct clk_factor_table *table, return val; } -static int clk_val_best(struct clk_hw *hw, unsigned long rate, +static int owl_clk_val_best(const struct owl_factor_hw *factor_hw, + struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate) { - struct owl_factor *factor = hw_to_owl_factor(hw); - struct owl_factor_hw *factor_hw = &factor->factor_hw; const struct clk_factor_table *clkt = factor_hw->table; unsigned long parent_rate, try_parent_rate, best = 0, cur_rate; unsigned long parent_rate_saved = *best_parent_rate; @@ -126,7 +125,7 @@ long owl_factor_helper_round_rate(struct owl_clk_common *common, const struct clk_factor_table *clkt = factor_hw->table; unsigned int val, mul = 0, div = 1; - val = clk_val_best(&common->hw, rate, parent_rate); + val = owl_clk_val_best(factor_hw, &common->hw, rate, parent_rate); _get_table_div_mul(clkt, val, &mul, &div); return *parent_rate * mul / div; diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index f607ee702c8384..87083b3a2769d2 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -21,6 +21,10 @@ #define MOR_KEY_MASK (0xff << 16) +#define clk_main_parent_select(s) (((s) & \ + (AT91_PMC_MOSCEN | \ + AT91_PMC_OSCBYPASS)) ? 1 : 0) + struct clk_main_osc { struct clk_hw hw; struct regmap *regmap; @@ -113,7 +117,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw) regmap_read(regmap, AT91_PMC_SR, &status); - return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN); + return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp); } static const struct clk_ops main_osc_ops = { @@ -152,7 +156,7 @@ at91_clk_register_main_osc(struct regmap *regmap, if (bypass) regmap_update_bits(regmap, AT91_CKGR_MOR, MOR_KEY_MASK | - AT91_PMC_MOSCEN, + AT91_PMC_OSCBYPASS, AT91_PMC_OSCBYPASS | AT91_PMC_KEY); hw = &osc->hw; @@ -450,7 +454,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw) regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); - return status & AT91_PMC_MOSCEN ? 1 : 0; + return clk_main_parent_select(status); } static const struct clk_ops sam9x5_main_ops = { @@ -492,7 +496,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap, clkmain->hw.init = &init; clkmain->regmap = regmap; regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); - clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0; + clkmain->parent = clk_main_parent_select(status); hw = &clkmain->hw; ret = clk_hw_register(NULL, &clkmain->hw); diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 6509d09348048f..0de1108737db93 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -21,7 +21,7 @@ static const struct clk_range plla_outputs[] = { }; static const struct clk_pll_characteristics plla_characteristics = { - .input = { .min = 12000000, .max = 12000000 }, + .input = { .min = 12000000, .max = 24000000 }, .num_output = ARRAY_SIZE(plla_outputs), .output = plla_outputs, .icpll = plla_icpll, diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 867ae3c20041cd..802e488fd3c3d5 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -114,6 +114,8 @@ #define CM_AVEODIV 0x1bc #define CM_EMMCCTL 0x1c0 #define CM_EMMCDIV 0x1c4 +#define CM_EMMC2CTL 0x1d0 +#define CM_EMMC2DIV 0x1d4 /* General bits for the CM_*CTL regs */ # define CM_ENABLE BIT(4) @@ -289,6 +291,10 @@ #define LOCK_TIMEOUT_NS 100000000 #define BCM2835_MAX_FB_RATE 1750000000u +#define SOC_BCM2835 BIT(0) +#define SOC_BCM2711 BIT(1) +#define SOC_ALL (SOC_BCM2835 | SOC_BCM2711) + /* * Names of clocks used within the driver that need to be replaced * with an external parent's name. This array is in the order that @@ -320,6 +326,10 @@ struct bcm2835_cprman { struct clk_hw_onecell_data onecell; }; +struct cprman_plat_data { + unsigned int soc; +}; + static inline void cprman_write(struct bcm2835_cprman *cprman, u32 reg, u32 val) { writel(CM_PASSWORD | val, cprman->regs + reg); @@ -1451,22 +1461,28 @@ typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, const void *data); struct bcm2835_clk_desc { bcm2835_clk_register clk_register; + unsigned int supported; const void *data; }; /* assignment helper macros for different clock types */ -#define _REGISTER(f, ...) { .clk_register = (bcm2835_clk_register)f, \ - .data = __VA_ARGS__ } -#define REGISTER_PLL(...) _REGISTER(&bcm2835_register_pll, \ +#define _REGISTER(f, s, ...) { .clk_register = (bcm2835_clk_register)f, \ + .supported = s, \ + .data = __VA_ARGS__ } +#define REGISTER_PLL(s, ...) _REGISTER(&bcm2835_register_pll, \ + s, \ &(struct bcm2835_pll_data) \ {__VA_ARGS__}) -#define REGISTER_PLL_DIV(...) _REGISTER(&bcm2835_register_pll_divider, \ - &(struct bcm2835_pll_divider_data) \ - {__VA_ARGS__}) -#define REGISTER_CLK(...) _REGISTER(&bcm2835_register_clock, \ +#define REGISTER_PLL_DIV(s, ...) _REGISTER(&bcm2835_register_pll_divider, \ + s, \ + &(struct bcm2835_pll_divider_data) \ + {__VA_ARGS__}) +#define REGISTER_CLK(s, ...) _REGISTER(&bcm2835_register_clock, \ + s, \ &(struct bcm2835_clock_data) \ {__VA_ARGS__}) -#define REGISTER_GATE(...) _REGISTER(&bcm2835_register_gate, \ +#define REGISTER_GATE(s, ...) _REGISTER(&bcm2835_register_gate, \ + s, \ &(struct bcm2835_gate_data) \ {__VA_ARGS__}) @@ -1480,7 +1496,8 @@ static const char *const bcm2835_clock_osc_parents[] = { "testdebug1" }; -#define REGISTER_OSC_CLK(...) REGISTER_CLK( \ +#define REGISTER_OSC_CLK(s, ...) REGISTER_CLK( \ + s, \ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents), \ .parents = bcm2835_clock_osc_parents, \ __VA_ARGS__) @@ -1497,7 +1514,8 @@ static const char *const bcm2835_clock_per_parents[] = { "pllh_aux", }; -#define REGISTER_PER_CLK(...) REGISTER_CLK( \ +#define REGISTER_PER_CLK(s, ...) REGISTER_CLK( \ + s, \ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents), \ .parents = bcm2835_clock_per_parents, \ __VA_ARGS__) @@ -1522,7 +1540,8 @@ static const char *const bcm2835_pcm_per_parents[] = { "-", }; -#define REGISTER_PCM_CLK(...) REGISTER_CLK( \ +#define REGISTER_PCM_CLK(s, ...) REGISTER_CLK( \ + s, \ .num_mux_parents = ARRAY_SIZE(bcm2835_pcm_per_parents), \ .parents = bcm2835_pcm_per_parents, \ __VA_ARGS__) @@ -1541,7 +1560,8 @@ static const char *const bcm2835_clock_vpu_parents[] = { "pllc_core2", }; -#define REGISTER_VPU_CLK(...) REGISTER_CLK( \ +#define REGISTER_VPU_CLK(s, ...) REGISTER_CLK( \ + s, \ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents), \ .parents = bcm2835_clock_vpu_parents, \ __VA_ARGS__) @@ -1577,12 +1597,14 @@ static const char *const bcm2835_clock_dsi1_parents[] = { "dsi1_byte_inv", }; -#define REGISTER_DSI0_CLK(...) REGISTER_CLK( \ +#define REGISTER_DSI0_CLK(s, ...) REGISTER_CLK( \ + s, \ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_dsi0_parents), \ .parents = bcm2835_clock_dsi0_parents, \ __VA_ARGS__) -#define REGISTER_DSI1_CLK(...) REGISTER_CLK( \ +#define REGISTER_DSI1_CLK(s, ...) REGISTER_CLK( \ + s, \ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_dsi1_parents), \ .parents = bcm2835_clock_dsi1_parents, \ __VA_ARGS__) @@ -1602,6 +1624,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * AUDIO domain is on. */ [BCM2835_PLLA] = REGISTER_PLL( + SOC_ALL, .name = "plla", .cm_ctrl_reg = CM_PLLA, .a2w_ctrl_reg = A2W_PLLA_CTRL, @@ -1616,6 +1639,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .max_rate = 2400000000u, .max_fb_rate = BCM2835_MAX_FB_RATE), [BCM2835_PLLA_CORE] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plla_core", .source_pll = "plla", .cm_reg = CM_PLLA, @@ -1625,6 +1649,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLA_PER] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plla_per", .source_pll = "plla", .cm_reg = CM_PLLA, @@ -1634,6 +1659,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLA_DSI0] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plla_dsi0", .source_pll = "plla", .cm_reg = CM_PLLA, @@ -1642,6 +1668,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .hold_mask = CM_PLLA_HOLDDSI0, .fixed_divider = 1), [BCM2835_PLLA_CCP2] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plla_ccp2", .source_pll = "plla", .cm_reg = CM_PLLA, @@ -1663,6 +1690,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * AUDIO domain is on. */ [BCM2835_PLLC] = REGISTER_PLL( + SOC_ALL, .name = "pllc", .cm_ctrl_reg = CM_PLLC, .a2w_ctrl_reg = A2W_PLLC_CTRL, @@ -1677,6 +1705,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .max_rate = 3000000000u, .max_fb_rate = BCM2835_MAX_FB_RATE), [BCM2835_PLLC_CORE0] = REGISTER_PLL_DIV( + SOC_ALL, .name = "pllc_core0", .source_pll = "pllc", .cm_reg = CM_PLLC, @@ -1686,6 +1715,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLC_CORE1] = REGISTER_PLL_DIV( + SOC_ALL, .name = "pllc_core1", .source_pll = "pllc", .cm_reg = CM_PLLC, @@ -1695,6 +1725,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLC_CORE2] = REGISTER_PLL_DIV( + SOC_ALL, .name = "pllc_core2", .source_pll = "pllc", .cm_reg = CM_PLLC, @@ -1704,6 +1735,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLC_PER] = REGISTER_PLL_DIV( + SOC_ALL, .name = "pllc_per", .source_pll = "pllc", .cm_reg = CM_PLLC, @@ -1720,6 +1752,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * AUDIO domain is on. */ [BCM2835_PLLD] = REGISTER_PLL( + SOC_ALL, .name = "plld", .cm_ctrl_reg = CM_PLLD, .a2w_ctrl_reg = A2W_PLLD_CTRL, @@ -1734,6 +1767,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .max_rate = 2400000000u, .max_fb_rate = BCM2835_MAX_FB_RATE), [BCM2835_PLLD_CORE] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plld_core", .source_pll = "plld", .cm_reg = CM_PLLD, @@ -1742,7 +1776,13 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .hold_mask = CM_PLLD_HOLDCORE, .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), + /* + * VPU firmware assumes that PLLD_PER isn't disabled by the ARM core. + * Otherwise this could cause firmware lookups. That's why we mark + * it as critical. + */ [BCM2835_PLLD_PER] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plld_per", .source_pll = "plld", .cm_reg = CM_PLLD, @@ -1750,8 +1790,9 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .load_mask = CM_PLLD_LOADPER, .hold_mask = CM_PLLD_HOLDPER, .fixed_divider = 1, - .flags = CLK_SET_RATE_PARENT), + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT), [BCM2835_PLLD_DSI0] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plld_dsi0", .source_pll = "plld", .cm_reg = CM_PLLD, @@ -1760,6 +1801,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .hold_mask = CM_PLLD_HOLDDSI0, .fixed_divider = 1), [BCM2835_PLLD_DSI1] = REGISTER_PLL_DIV( + SOC_ALL, .name = "plld_dsi1", .source_pll = "plld", .cm_reg = CM_PLLD, @@ -1775,6 +1817,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * It is in the HDMI power domain. */ [BCM2835_PLLH] = REGISTER_PLL( + SOC_BCM2835, "pllh", .cm_ctrl_reg = CM_PLLH, .a2w_ctrl_reg = A2W_PLLH_CTRL, @@ -1789,6 +1832,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .max_rate = 3000000000u, .max_fb_rate = BCM2835_MAX_FB_RATE), [BCM2835_PLLH_RCAL] = REGISTER_PLL_DIV( + SOC_BCM2835, .name = "pllh_rcal", .source_pll = "pllh", .cm_reg = CM_PLLH, @@ -1798,6 +1842,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 10, .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLH_AUX] = REGISTER_PLL_DIV( + SOC_BCM2835, .name = "pllh_aux", .source_pll = "pllh", .cm_reg = CM_PLLH, @@ -1807,6 +1852,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV( + SOC_BCM2835, .name = "pllh_pix", .source_pll = "pllh", .cm_reg = CM_PLLH, @@ -1822,6 +1868,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { /* One Time Programmable Memory clock. Maximum 10Mhz. */ [BCM2835_CLOCK_OTP] = REGISTER_OSC_CLK( + SOC_ALL, .name = "otp", .ctl_reg = CM_OTPCTL, .div_reg = CM_OTPDIV, @@ -1833,6 +1880,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * bythe watchdog timer and the camera pulse generator. */ [BCM2835_CLOCK_TIMER] = REGISTER_OSC_CLK( + SOC_ALL, .name = "timer", .ctl_reg = CM_TIMERCTL, .div_reg = CM_TIMERDIV, @@ -1843,12 +1891,14 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * Generally run at 2Mhz, max 5Mhz. */ [BCM2835_CLOCK_TSENS] = REGISTER_OSC_CLK( + SOC_ALL, .name = "tsens", .ctl_reg = CM_TSENSCTL, .div_reg = CM_TSENSDIV, .int_bits = 5, .frac_bits = 0), [BCM2835_CLOCK_TEC] = REGISTER_OSC_CLK( + SOC_ALL, .name = "tec", .ctl_reg = CM_TECCTL, .div_reg = CM_TECDIV, @@ -1857,6 +1907,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { /* clocks with vpu parent mux */ [BCM2835_CLOCK_H264] = REGISTER_VPU_CLK( + SOC_ALL, .name = "h264", .ctl_reg = CM_H264CTL, .div_reg = CM_H264DIV, @@ -1864,6 +1915,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 1), [BCM2835_CLOCK_ISP] = REGISTER_VPU_CLK( + SOC_ALL, .name = "isp", .ctl_reg = CM_ISPCTL, .div_reg = CM_ISPDIV, @@ -1876,6 +1928,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * in the SDRAM controller can't be used. */ [BCM2835_CLOCK_SDRAM] = REGISTER_VPU_CLK( + SOC_ALL, .name = "sdram", .ctl_reg = CM_SDCCTL, .div_reg = CM_SDCDIV, @@ -1883,6 +1936,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 0, .tcnt_mux = 3), [BCM2835_CLOCK_V3D] = REGISTER_VPU_CLK( + SOC_ALL, .name = "v3d", .ctl_reg = CM_V3DCTL, .div_reg = CM_V3DDIV, @@ -1896,6 +1950,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * in various hardware documentation. */ [BCM2835_CLOCK_VPU] = REGISTER_VPU_CLK( + SOC_ALL, .name = "vpu", .ctl_reg = CM_VPUCTL, .div_reg = CM_VPUDIV, @@ -1907,6 +1962,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { /* clocks with per parent mux */ [BCM2835_CLOCK_AVEO] = REGISTER_PER_CLK( + SOC_ALL, .name = "aveo", .ctl_reg = CM_AVEOCTL, .div_reg = CM_AVEODIV, @@ -1914,6 +1970,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 0, .tcnt_mux = 38), [BCM2835_CLOCK_CAM0] = REGISTER_PER_CLK( + SOC_ALL, .name = "cam0", .ctl_reg = CM_CAM0CTL, .div_reg = CM_CAM0DIV, @@ -1921,6 +1978,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 14), [BCM2835_CLOCK_CAM1] = REGISTER_PER_CLK( + SOC_ALL, .name = "cam1", .ctl_reg = CM_CAM1CTL, .div_reg = CM_CAM1DIV, @@ -1928,12 +1986,14 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 15), [BCM2835_CLOCK_DFT] = REGISTER_PER_CLK( + SOC_ALL, .name = "dft", .ctl_reg = CM_DFTCTL, .div_reg = CM_DFTDIV, .int_bits = 5, .frac_bits = 0), [BCM2835_CLOCK_DPI] = REGISTER_PER_CLK( + SOC_ALL, .name = "dpi", .ctl_reg = CM_DPICTL, .div_reg = CM_DPIDIV, @@ -1943,6 +2003,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { /* Arasan EMMC clock */ [BCM2835_CLOCK_EMMC] = REGISTER_PER_CLK( + SOC_ALL, .name = "emmc", .ctl_reg = CM_EMMCCTL, .div_reg = CM_EMMCDIV, @@ -1950,8 +2011,19 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 39), + /* EMMC2 clock (only available for BCM2711) */ + [BCM2711_CLOCK_EMMC2] = REGISTER_PER_CLK( + SOC_BCM2711, + .name = "emmc2", + .ctl_reg = CM_EMMC2CTL, + .div_reg = CM_EMMC2DIV, + .int_bits = 4, + .frac_bits = 8, + .tcnt_mux = 42), + /* General purpose (GPIO) clocks */ [BCM2835_CLOCK_GP0] = REGISTER_PER_CLK( + SOC_ALL, .name = "gp0", .ctl_reg = CM_GP0CTL, .div_reg = CM_GP0DIV, @@ -1960,6 +2032,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .is_mash_clock = true, .tcnt_mux = 20), [BCM2835_CLOCK_GP1] = REGISTER_PER_CLK( + SOC_ALL, .name = "gp1", .ctl_reg = CM_GP1CTL, .div_reg = CM_GP1DIV, @@ -1969,6 +2042,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .is_mash_clock = true, .tcnt_mux = 21), [BCM2835_CLOCK_GP2] = REGISTER_PER_CLK( + SOC_ALL, .name = "gp2", .ctl_reg = CM_GP2CTL, .div_reg = CM_GP2DIV, @@ -1978,6 +2052,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { /* HDMI state machine */ [BCM2835_CLOCK_HSM] = REGISTER_PER_CLK( + SOC_ALL, .name = "hsm", .ctl_reg = CM_HSMCTL, .div_reg = CM_HSMDIV, @@ -1985,6 +2060,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 22), [BCM2835_CLOCK_PCM] = REGISTER_PCM_CLK( + SOC_ALL, .name = "pcm", .ctl_reg = CM_PCMCTL, .div_reg = CM_PCMDIV, @@ -1994,6 +2070,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .low_jitter = true, .tcnt_mux = 23), [BCM2835_CLOCK_PWM] = REGISTER_PER_CLK( + SOC_ALL, .name = "pwm", .ctl_reg = CM_PWMCTL, .div_reg = CM_PWMDIV, @@ -2002,6 +2079,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .is_mash_clock = true, .tcnt_mux = 24), [BCM2835_CLOCK_SLIM] = REGISTER_PER_CLK( + SOC_ALL, .name = "slim", .ctl_reg = CM_SLIMCTL, .div_reg = CM_SLIMDIV, @@ -2010,6 +2088,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .is_mash_clock = true, .tcnt_mux = 25), [BCM2835_CLOCK_SMI] = REGISTER_PER_CLK( + SOC_ALL, .name = "smi", .ctl_reg = CM_SMICTL, .div_reg = CM_SMIDIV, @@ -2017,6 +2096,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 27), [BCM2835_CLOCK_UART] = REGISTER_PER_CLK( + SOC_ALL, .name = "uart", .ctl_reg = CM_UARTCTL, .div_reg = CM_UARTDIV, @@ -2026,6 +2106,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { /* TV encoder clock. Only operating frequency is 108Mhz. */ [BCM2835_CLOCK_VEC] = REGISTER_PER_CLK( + SOC_ALL, .name = "vec", .ctl_reg = CM_VECCTL, .div_reg = CM_VECDIV, @@ -2040,6 +2121,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { /* dsi clocks */ [BCM2835_CLOCK_DSI0E] = REGISTER_PER_CLK( + SOC_ALL, .name = "dsi0e", .ctl_reg = CM_DSI0ECTL, .div_reg = CM_DSI0EDIV, @@ -2047,6 +2129,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 18), [BCM2835_CLOCK_DSI1E] = REGISTER_PER_CLK( + SOC_ALL, .name = "dsi1e", .ctl_reg = CM_DSI1ECTL, .div_reg = CM_DSI1EDIV, @@ -2054,6 +2137,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 8, .tcnt_mux = 19), [BCM2835_CLOCK_DSI0P] = REGISTER_DSI0_CLK( + SOC_ALL, .name = "dsi0p", .ctl_reg = CM_DSI0PCTL, .div_reg = CM_DSI0PDIV, @@ -2061,6 +2145,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .frac_bits = 0, .tcnt_mux = 12), [BCM2835_CLOCK_DSI1P] = REGISTER_DSI1_CLK( + SOC_ALL, .name = "dsi1p", .ctl_reg = CM_DSI1PCTL, .div_reg = CM_DSI1PDIV, @@ -2077,6 +2162,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * non-stop vpu clock. */ [BCM2835_CLOCK_PERI_IMAGE] = REGISTER_GATE( + SOC_ALL, .name = "peri_image", .parent = "vpu", .ctl_reg = CM_PERIICTL), @@ -2109,9 +2195,14 @@ static int bcm2835_clk_probe(struct platform_device *pdev) struct resource *res; const struct bcm2835_clk_desc *desc; const size_t asize = ARRAY_SIZE(clk_desc_array); + const struct cprman_plat_data *pdata; size_t i; int ret; + pdata = of_device_get_match_data(&pdev->dev); + if (!pdata) + return -ENODEV; + cprman = devm_kzalloc(dev, struct_size(cprman, onecell.hws, asize), GFP_KERNEL); @@ -2147,8 +2238,10 @@ static int bcm2835_clk_probe(struct platform_device *pdev) for (i = 0; i < asize; i++) { desc = &clk_desc_array[i]; - if (desc->clk_register && desc->data) + if (desc->clk_register && desc->data && + (desc->supported & pdata->soc)) { hws[i] = desc->clk_register(cprman, desc->data); + } } ret = bcm2835_mark_sdc_parent_critical(hws[BCM2835_CLOCK_SDRAM]->clk); @@ -2159,8 +2252,17 @@ static int bcm2835_clk_probe(struct platform_device *pdev) &cprman->onecell); } +static const struct cprman_plat_data cprman_bcm2835_plat_data = { + .soc = SOC_BCM2835, +}; + +static const struct cprman_plat_data cprman_bcm2711_plat_data = { + .soc = SOC_BCM2711, +}; + static const struct of_device_id bcm2835_clk_of_match[] = { - { .compatible = "brcm,bcm2835-cprman", }, + { .compatible = "brcm,bcm2835-cprman", .data = &cprman_bcm2835_plat_data }, + { .compatible = "brcm,bcm2711-cprman", .data = &cprman_bcm2711_plat_data }, {} }; MODULE_DEVICE_TABLE(of, bcm2835_clk_of_match); diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c index 9e1dcd43258c60..98e884957db871 100644 --- a/drivers/clk/bcm/clk-bcm63xx-gate.c +++ b/drivers/clk/bcm/clk-bcm63xx-gate.c @@ -146,7 +146,6 @@ static int clk_bcm63xx_probe(struct platform_device *pdev) { const struct clk_bcm63xx_table_entry *entry, *table; struct clk_bcm63xx_hw *hw; - struct resource *r; u8 maxbit = 0; int i, ret; @@ -170,8 +169,7 @@ static int clk_bcm63xx_probe(struct platform_device *pdev) for (i = 0; i < maxbit; i++) hw->data.hws[i] = ERR_PTR(-ENODEV); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hw->regs = devm_ioremap_resource(&pdev->dev, r); + hw->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hw->regs)) return PTR_ERR(hw->regs); diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c index 42b4df6ba249f2..abf06fb6453e31 100644 --- a/drivers/clk/clk-aspeed.c +++ b/drivers/clk/clk-aspeed.c @@ -1,19 +1,19 @@ // SPDX-License-Identifier: GPL-2.0+ +// Copyright IBM Corp #define pr_fmt(fmt) "clk-aspeed: " fmt -#include #include #include #include #include #include -#include #include -#include #include +#include "clk-aspeed.h" + #define ASPEED_NUM_CLKS 36 #define ASPEED_RESET2_OFFSET 32 @@ -42,48 +42,6 @@ static struct clk_hw_onecell_data *aspeed_clk_data; static void __iomem *scu_base; -/** - * struct aspeed_gate_data - Aspeed gated clocks - * @clock_idx: bit used to gate this clock in the clock register - * @reset_idx: bit used to reset this IP in the reset register. -1 if no - * reset is required when enabling the clock - * @name: the clock name - * @parent_name: the name of the parent clock - * @flags: standard clock framework flags - */ -struct aspeed_gate_data { - u8 clock_idx; - s8 reset_idx; - const char *name; - const char *parent_name; - unsigned long flags; -}; - -/** - * struct aspeed_clk_gate - Aspeed specific clk_gate structure - * @hw: handle between common and hardware-specific interfaces - * @reg: register controlling gate - * @clock_idx: bit used to gate this clock in the clock register - * @reset_idx: bit used to reset this IP in the reset register. -1 if no - * reset is required when enabling the clock - * @flags: hardware-specific flags - * @lock: register lock - * - * Some of the clocks in the Aspeed SoC must be put in reset before enabling. - * This modified version of clk_gate allows an optional reset bit to be - * specified. - */ -struct aspeed_clk_gate { - struct clk_hw hw; - struct regmap *map; - u8 clock_idx; - s8 reset_idx; - u8 flags; - spinlock_t *lock; -}; - -#define to_aspeed_clk_gate(_hw) container_of(_hw, struct aspeed_clk_gate, hw) - /* TODO: ask Aspeed about the actual parent data */ static const struct aspeed_gate_data aspeed_gates[] = { /* clk rst name parent flags */ @@ -208,13 +166,6 @@ static struct clk_hw *aspeed_ast2500_calc_pll(const char *name, u32 val) mult, div); } -struct aspeed_clk_soc_data { - const struct clk_div_table *div_table; - const struct clk_div_table *eclk_div_table; - const struct clk_div_table *mac_div_table; - struct clk_hw *(*calc_pll)(const char *name, u32 val); -}; - static const struct aspeed_clk_soc_data ast2500_data = { .div_table = ast2500_div_table, .eclk_div_table = ast2500_eclk_div_table, @@ -315,18 +266,6 @@ static const struct clk_ops aspeed_clk_gate_ops = { .is_enabled = aspeed_clk_is_enabled, }; -/** - * struct aspeed_reset - Aspeed reset controller - * @map: regmap to access the containing system controller - * @rcdev: reset controller device - */ -struct aspeed_reset { - struct regmap *map; - struct reset_controller_dev rcdev; -}; - -#define to_aspeed_reset(p) container_of((p), struct aspeed_reset, rcdev) - static const u8 aspeed_resets[] = { /* SCU04 resets */ [ASPEED_RESET_XDMA] = 25, @@ -500,9 +439,14 @@ static int aspeed_clk_probe(struct platform_device *pdev) return PTR_ERR(hw); aspeed_clk_data->hws[ASPEED_CLK_MPLL] = hw; - /* SD/SDIO clock divider (TODO: There's a gate too) */ - hw = clk_hw_register_divider_table(dev, "sdio", "hpll", 0, - scu_base + ASPEED_CLK_SELECTION, 12, 3, 0, + /* SD/SDIO clock divider and gate */ + hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hpll", 0, + scu_base + ASPEED_CLK_SELECTION, 15, 0, + &aspeed_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + hw = clk_hw_register_divider_table(dev, "sd_extclk", "sd_extclk_gate", + 0, scu_base + ASPEED_CLK_SELECTION, 12, 3, 0, soc_data->div_table, &aspeed_clk_lock); if (IS_ERR(hw)) diff --git a/drivers/clk/clk-aspeed.h b/drivers/clk/clk-aspeed.h new file mode 100644 index 00000000000000..5296b15b1c88e1 --- /dev/null +++ b/drivers/clk/clk-aspeed.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Structures used by ASPEED clock drivers + * + * Copyright 2019 IBM Corp. + */ + +#include +#include +#include +#include + +struct clk_div_table; +struct regmap; + +/** + * struct aspeed_gate_data - Aspeed gated clocks + * @clock_idx: bit used to gate this clock in the clock register + * @reset_idx: bit used to reset this IP in the reset register. -1 if no + * reset is required when enabling the clock + * @name: the clock name + * @parent_name: the name of the parent clock + * @flags: standard clock framework flags + */ +struct aspeed_gate_data { + u8 clock_idx; + s8 reset_idx; + const char *name; + const char *parent_name; + unsigned long flags; +}; + +/** + * struct aspeed_clk_gate - Aspeed specific clk_gate structure + * @hw: handle between common and hardware-specific interfaces + * @reg: register controlling gate + * @clock_idx: bit used to gate this clock in the clock register + * @reset_idx: bit used to reset this IP in the reset register. -1 if no + * reset is required when enabling the clock + * @flags: hardware-specific flags + * @lock: register lock + * + * Some of the clocks in the Aspeed SoC must be put in reset before enabling. + * This modified version of clk_gate allows an optional reset bit to be + * specified. + */ +struct aspeed_clk_gate { + struct clk_hw hw; + struct regmap *map; + u8 clock_idx; + s8 reset_idx; + u8 flags; + spinlock_t *lock; +}; + +#define to_aspeed_clk_gate(_hw) container_of(_hw, struct aspeed_clk_gate, hw) + +/** + * struct aspeed_reset - Aspeed reset controller + * @map: regmap to access the containing system controller + * @rcdev: reset controller device + */ +struct aspeed_reset { + struct regmap *map; + struct reset_controller_dev rcdev; +}; + +#define to_aspeed_reset(p) container_of((p), struct aspeed_reset, rcdev) + +/** + * struct aspeed_clk_soc_data - Aspeed SoC specific divisor information + * @div_table: Common divider lookup table + * @eclk_div_table: Divider lookup table for ECLK + * @mac_div_table: Divider lookup table for MAC (Ethernet) clocks + * @calc_pll: Callback to maculate common PLL settings + */ +struct aspeed_clk_soc_data { + const struct clk_div_table *div_table; + const struct clk_div_table *eclk_div_table; + const struct clk_div_table *mac_div_table; + struct clk_hw *(*calc_pll)(const char *name, u32 val); +}; diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c new file mode 100644 index 00000000000000..1c1bb39bb04e67 --- /dev/null +++ b/drivers/clk/clk-ast2600.c @@ -0,0 +1,704 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright IBM Corp +// Copyright ASPEED Technology + +#define pr_fmt(fmt) "clk-ast2600: " fmt + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-aspeed.h" + +#define ASPEED_G6_NUM_CLKS 67 + +#define ASPEED_G6_SILICON_REV 0x004 + +#define ASPEED_G6_RESET_CTRL 0x040 +#define ASPEED_G6_RESET_CTRL2 0x050 + +#define ASPEED_G6_CLK_STOP_CTRL 0x080 +#define ASPEED_G6_CLK_STOP_CTRL2 0x090 + +#define ASPEED_G6_MISC_CTRL 0x0C0 +#define UART_DIV13_EN BIT(12) + +#define ASPEED_G6_CLK_SELECTION1 0x300 +#define ASPEED_G6_CLK_SELECTION2 0x304 +#define ASPEED_G6_CLK_SELECTION4 0x310 + +#define ASPEED_HPLL_PARAM 0x200 +#define ASPEED_APLL_PARAM 0x210 +#define ASPEED_MPLL_PARAM 0x220 +#define ASPEED_EPLL_PARAM 0x240 +#define ASPEED_DPLL_PARAM 0x260 + +#define ASPEED_G6_STRAP1 0x500 + +/* Globally visible clocks */ +static DEFINE_SPINLOCK(aspeed_g6_clk_lock); + +/* Keeps track of all clocks */ +static struct clk_hw_onecell_data *aspeed_g6_clk_data; + +static void __iomem *scu_g6_base; + +/* + * Clocks marked with CLK_IS_CRITICAL: + * + * ref0 and ref1 are essential for the SoC to operate + * mpll is required if SDRAM is used + */ +static const struct aspeed_gate_data aspeed_g6_gates[] = { + /* clk rst name parent flags */ + [ASPEED_CLK_GATE_MCLK] = { 0, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ + [ASPEED_CLK_GATE_ECLK] = { 1, -1, "eclk-gate", "eclk", 0 }, /* Video Engine */ + [ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ + /* vclk parent - dclk/d1clk/hclk/mclk */ + [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ + [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ + /* From dpll */ + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ + [ASPEED_CLK_GATE_REF0CLK] = { 6, -1, "ref0clk-gate", "clkin", CLK_IS_CRITICAL }, + [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ + /* Reserved 8 */ + [ASPEED_CLK_GATE_USBUHCICLK] = { 9, 15, "usb-uhci-gate", NULL, 0 }, /* USB1.1 (requires port 2 enabled) */ + /* From dpll/epll/40mhz usb p1 phy/gpioc6/dp phy pll */ + [ASPEED_CLK_GATE_D1CLK] = { 10, 13, "d1clk-gate", "d1clk", 0 }, /* GFX CRT */ + /* Reserved 11/12 */ + [ASPEED_CLK_GATE_YCLK] = { 13, 4, "yclk-gate", NULL, 0 }, /* HAC */ + [ASPEED_CLK_GATE_USBPORT1CLK] = { 14, 14, "usb-port1-gate", NULL, 0 }, /* USB2 hub/USB2 host port 1/USB1.1 dev */ + [ASPEED_CLK_GATE_UART5CLK] = { 15, -1, "uart5clk-gate", "uart", 0 }, /* UART5 */ + /* Reserved 16/19 */ + [ASPEED_CLK_GATE_MAC1CLK] = { 20, 11, "mac1clk-gate", "mac12", 0 }, /* MAC1 */ + [ASPEED_CLK_GATE_MAC2CLK] = { 21, 12, "mac2clk-gate", "mac12", 0 }, /* MAC2 */ + /* Reserved 22/23 */ + [ASPEED_CLK_GATE_RSACLK] = { 24, 4, "rsaclk-gate", NULL, 0 }, /* HAC */ + [ASPEED_CLK_GATE_RVASCLK] = { 25, 9, "rvasclk-gate", NULL, 0 }, /* RVAS */ + /* Reserved 26 */ + [ASPEED_CLK_GATE_EMMCCLK] = { 27, 16, "emmcclk-gate", NULL, 0 }, /* For card clk */ + /* Reserved 28/29/30 */ + [ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, 0 }, /* LPC */ + [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, 0 }, /* eSPI */ + [ASPEED_CLK_GATE_REF1CLK] = { 34, -1, "ref1clk-gate", "clkin", CLK_IS_CRITICAL }, + /* Reserved 35 */ + [ASPEED_CLK_GATE_SDCLK] = { 36, 56, "sdclk-gate", NULL, 0 }, /* SDIO/SD */ + [ASPEED_CLK_GATE_LHCCLK] = { 37, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */ + /* Reserved 38 RSA: no longer used */ + /* Reserved 39 */ + [ASPEED_CLK_GATE_I3C0CLK] = { 40, 40, "i3c0clk-gate", NULL, 0 }, /* I3C0 */ + [ASPEED_CLK_GATE_I3C1CLK] = { 41, 41, "i3c1clk-gate", NULL, 0 }, /* I3C1 */ + [ASPEED_CLK_GATE_I3C2CLK] = { 42, 42, "i3c2clk-gate", NULL, 0 }, /* I3C2 */ + [ASPEED_CLK_GATE_I3C3CLK] = { 43, 43, "i3c3clk-gate", NULL, 0 }, /* I3C3 */ + [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", NULL, 0 }, /* I3C4 */ + [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", NULL, 0 }, /* I3C5 */ + [ASPEED_CLK_GATE_I3C6CLK] = { 46, 46, "i3c6clk-gate", NULL, 0 }, /* I3C6 */ + [ASPEED_CLK_GATE_I3C7CLK] = { 47, 47, "i3c7clk-gate", NULL, 0 }, /* I3C7 */ + [ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */ + [ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */ + [ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */ + [ASPEED_CLK_GATE_UART4CLK] = { 51, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */ + [ASPEED_CLK_GATE_MAC3CLK] = { 52, 52, "mac3clk-gate", "mac34", 0 }, /* MAC3 */ + [ASPEED_CLK_GATE_MAC4CLK] = { 53, 53, "mac4clk-gate", "mac34", 0 }, /* MAC4 */ + [ASPEED_CLK_GATE_UART6CLK] = { 54, -1, "uart6clk-gate", "uartx", 0 }, /* UART6 */ + [ASPEED_CLK_GATE_UART7CLK] = { 55, -1, "uart7clk-gate", "uartx", 0 }, /* UART7 */ + [ASPEED_CLK_GATE_UART8CLK] = { 56, -1, "uart8clk-gate", "uartx", 0 }, /* UART8 */ + [ASPEED_CLK_GATE_UART9CLK] = { 57, -1, "uart9clk-gate", "uartx", 0 }, /* UART9 */ + [ASPEED_CLK_GATE_UART10CLK] = { 58, -1, "uart10clk-gate", "uartx", 0 }, /* UART10 */ + [ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uartx", 0 }, /* UART11 */ + [ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uartx", 0 }, /* UART12 */ + [ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uartx", 0 }, /* UART13 */ + [ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", NULL, 0 }, /* FSI */ +}; + +static const char * const eclk_parent_names[] = { "mpll", "hpll", "dpll" }; + +static const struct clk_div_table ast2600_eclk_div_table[] = { + { 0x0, 2 }, + { 0x1, 2 }, + { 0x2, 3 }, + { 0x3, 4 }, + { 0x4, 5 }, + { 0x5, 6 }, + { 0x6, 7 }, + { 0x7, 8 }, + { 0 } +}; + +static const struct clk_div_table ast2600_mac_div_table[] = { + { 0x0, 4 }, + { 0x1, 4 }, + { 0x2, 6 }, + { 0x3, 8 }, + { 0x4, 10 }, + { 0x5, 12 }, + { 0x6, 14 }, + { 0x7, 16 }, + { 0 } +}; + +static const struct clk_div_table ast2600_div_table[] = { + { 0x0, 4 }, + { 0x1, 8 }, + { 0x2, 12 }, + { 0x3, 16 }, + { 0x4, 20 }, + { 0x5, 24 }, + { 0x6, 28 }, + { 0x7, 32 }, + { 0 } +}; + +/* For hpll/dpll/epll/mpll */ +static struct clk_hw *ast2600_calc_pll(const char *name, u32 val) +{ + unsigned int mult, div; + + if (val & BIT(24)) { + /* Pass through mode */ + mult = div = 1; + } else { + /* F = 25Mhz * [(M + 2) / (n + 1)] / (p + 1) */ + u32 m = val & 0x1fff; + u32 n = (val >> 13) & 0x3f; + u32 p = (val >> 19) & 0xf; + mult = (m + 1) / (n + 1); + div = (p + 1); + } + return clk_hw_register_fixed_factor(NULL, name, "clkin", 0, + mult, div); +}; + +static struct clk_hw *ast2600_calc_apll(const char *name, u32 val) +{ + unsigned int mult, div; + + if (val & BIT(20)) { + /* Pass through mode */ + mult = div = 1; + } else { + /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */ + u32 m = (val >> 5) & 0x3f; + u32 od = (val >> 4) & 0x1; + u32 n = val & 0xf; + + mult = (2 - od) * (m + 2); + div = n + 1; + } + return clk_hw_register_fixed_factor(NULL, name, "clkin", 0, + mult, div); +}; + +static u32 get_bit(u8 idx) +{ + return BIT(idx % 32); +} + +static u32 get_reset_reg(struct aspeed_clk_gate *gate) +{ + if (gate->reset_idx < 32) + return ASPEED_G6_RESET_CTRL; + + return ASPEED_G6_RESET_CTRL2; +} + +static u32 get_clock_reg(struct aspeed_clk_gate *gate) +{ + if (gate->clock_idx < 32) + return ASPEED_G6_CLK_STOP_CTRL; + + return ASPEED_G6_CLK_STOP_CTRL2; +} + +static int aspeed_g6_clk_is_enabled(struct clk_hw *hw) +{ + struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); + u32 clk = get_bit(gate->clock_idx); + u32 rst = get_bit(gate->reset_idx); + u32 reg; + u32 enval; + + /* + * If the IP is in reset, treat the clock as not enabled, + * this happens with some clocks such as the USB one when + * coming from cold reset. Without this, aspeed_clk_enable() + * will fail to lift the reset. + */ + if (gate->reset_idx >= 0) { + regmap_read(gate->map, get_reset_reg(gate), ®); + + if (reg & rst) + return 0; + } + + regmap_read(gate->map, get_clock_reg(gate), ®); + + enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; + + return ((reg & clk) == enval) ? 1 : 0; +} + +static int aspeed_g6_clk_enable(struct clk_hw *hw) +{ + struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); + unsigned long flags; + u32 clk = get_bit(gate->clock_idx); + u32 rst = get_bit(gate->reset_idx); + + spin_lock_irqsave(gate->lock, flags); + + if (aspeed_g6_clk_is_enabled(hw)) { + spin_unlock_irqrestore(gate->lock, flags); + return 0; + } + + if (gate->reset_idx >= 0) { + /* Put IP in reset */ + regmap_write(gate->map, get_reset_reg(gate), rst); + /* Delay 100us */ + udelay(100); + } + + /* Enable clock */ + if (gate->flags & CLK_GATE_SET_TO_DISABLE) { + regmap_write(gate->map, get_clock_reg(gate), clk); + } else { + /* Use set to clear register */ + regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk); + } + + if (gate->reset_idx >= 0) { + /* A delay of 10ms is specified by the ASPEED docs */ + mdelay(10); + /* Take IP out of reset */ + regmap_write(gate->map, get_reset_reg(gate) + 0x4, rst); + } + + spin_unlock_irqrestore(gate->lock, flags); + + return 0; +} + +static void aspeed_g6_clk_disable(struct clk_hw *hw) +{ + struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); + unsigned long flags; + u32 clk = get_bit(gate->clock_idx); + + spin_lock_irqsave(gate->lock, flags); + + if (gate->flags & CLK_GATE_SET_TO_DISABLE) { + regmap_write(gate->map, get_clock_reg(gate), clk); + } else { + /* Use set to clear register */ + regmap_write(gate->map, get_clock_reg(gate) + 0x4, clk); + } + + spin_unlock_irqrestore(gate->lock, flags); +} + +static const struct clk_ops aspeed_g6_clk_gate_ops = { + .enable = aspeed_g6_clk_enable, + .disable = aspeed_g6_clk_disable, + .is_enabled = aspeed_g6_clk_is_enabled, +}; + +static int aspeed_g6_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct aspeed_reset *ar = to_aspeed_reset(rcdev); + u32 rst = get_bit(id); + u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL; + + /* Use set to clear register */ + return regmap_write(ar->map, reg + 0x04, rst); +} + +static int aspeed_g6_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct aspeed_reset *ar = to_aspeed_reset(rcdev); + u32 rst = get_bit(id); + u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL; + + return regmap_write(ar->map, reg, rst); +} + +static int aspeed_g6_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct aspeed_reset *ar = to_aspeed_reset(rcdev); + int ret; + u32 val; + u32 rst = get_bit(id); + u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL; + + ret = regmap_read(ar->map, reg, &val); + if (ret) + return ret; + + return !!(val & rst); +} + +static const struct reset_control_ops aspeed_g6_reset_ops = { + .assert = aspeed_g6_reset_assert, + .deassert = aspeed_g6_reset_deassert, + .status = aspeed_g6_reset_status, +}; + +static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + struct regmap *map, u8 clock_idx, u8 reset_idx, + u8 clk_gate_flags, spinlock_t *lock) +{ + struct aspeed_clk_gate *gate; + struct clk_init_data init; + struct clk_hw *hw; + int ret; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &aspeed_g6_clk_gate_ops; + init.flags = flags; + init.parent_names = parent_name ? &parent_name : NULL; + init.num_parents = parent_name ? 1 : 0; + + gate->map = map; + gate->clock_idx = clock_idx; + gate->reset_idx = reset_idx; + gate->flags = clk_gate_flags; + gate->lock = lock; + gate->hw.init = &init; + + hw = &gate->hw; + ret = clk_hw_register(dev, hw); + if (ret) { + kfree(gate); + hw = ERR_PTR(ret); + } + + return hw; +} + +static const char * const vclk_parent_names[] = { + "dpll", + "d1pll", + "hclk", + "mclk", +}; + +static const char * const d1clk_parent_names[] = { + "dpll", + "epll", + "usb-phy-40m", + "gpioc6_clkin", + "dp_phy_pll", +}; + +static int aspeed_g6_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct aspeed_reset *ar; + struct regmap *map; + struct clk_hw *hw; + u32 val, rate; + int i, ret; + + map = syscon_node_to_regmap(dev->of_node); + if (IS_ERR(map)) { + dev_err(dev, "no syscon regmap\n"); + return PTR_ERR(map); + } + + ar = devm_kzalloc(dev, sizeof(*ar), GFP_KERNEL); + if (!ar) + return -ENOMEM; + + ar->map = map; + + ar->rcdev.owner = THIS_MODULE; + ar->rcdev.nr_resets = 64; + ar->rcdev.ops = &aspeed_g6_reset_ops; + ar->rcdev.of_node = dev->of_node; + + ret = devm_reset_controller_register(dev, &ar->rcdev); + if (ret) { + dev_err(dev, "could not register reset controller\n"); + return ret; + } + + /* UART clock div13 setting */ + regmap_read(map, ASPEED_G6_MISC_CTRL, &val); + if (val & UART_DIV13_EN) + rate = 24000000 / 13; + else + rate = 24000000; + hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_UART] = hw; + + /* UART6~13 clock div13 setting */ + regmap_read(map, 0x80, &val); + if (val & BIT(31)) + rate = 24000000 / 13; + else + rate = 24000000; + hw = clk_hw_register_fixed_rate(dev, "uartx", NULL, 0, rate); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw; + + /* EMMC ext clock divider */ + hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0, + ast2600_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw; + + /* SD/SDIO clock divider and gate */ + hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hpll", 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + hw = clk_hw_register_divider_table(dev, "sd_extclk", "sd_extclk_gate", + 0, scu_g6_base + ASPEED_G6_CLK_SELECTION4, 28, 3, 0, + ast2600_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_SDIO] = hw; + + /* MAC1/2 AHB bus clock divider */ + hw = clk_hw_register_divider_table(dev, "mac12", "hpll", 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 16, 3, 0, + ast2600_mac_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_MAC12] = hw; + + /* MAC3/4 AHB bus clock divider */ + hw = clk_hw_register_divider_table(dev, "mac34", "hpll", 0, + scu_g6_base + 0x310, 24, 3, 0, + ast2600_mac_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_MAC34] = hw; + + /* LPC Host (LHCLK) clock divider */ + hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0, + ast2600_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_LHCLK] = hw; + + /* gfx d1clk : use dp clk */ + regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(10)); + /* SoC Display clock selection */ + hw = clk_hw_register_mux(dev, "d1clk", d1clk_parent_names, + ARRAY_SIZE(d1clk_parent_names), 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 8, 3, 0, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_D1CLK] = hw; + + /* d1 clk div 0x308[17:15] x [14:12] - 8,7,6,5,4,3,2,1 */ + regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */ + + /* P-Bus (BCLK) clock divider */ + hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0, + ast2600_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_BCLK] = hw; + + /* Video Capture clock selection */ + hw = clk_hw_register_mux(dev, "vclk", vclk_parent_names, + ARRAY_SIZE(vclk_parent_names), 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION2, 12, 3, 0, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_VCLK] = hw; + + /* Video Engine clock divider */ + hw = clk_hw_register_divider_table(dev, "eclk", NULL, 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 28, 3, 0, + ast2600_eclk_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_ECLK] = hw; + + for (i = 0; i < ARRAY_SIZE(aspeed_g6_gates); i++) { + const struct aspeed_gate_data *gd = &aspeed_g6_gates[i]; + u32 gate_flags; + + /* + * Special case: the USB port 1 clock (bit 14) is always + * working the opposite way from the other ones. + */ + gate_flags = (gd->clock_idx == 14) ? 0 : CLK_GATE_SET_TO_DISABLE; + hw = aspeed_g6_clk_hw_register_gate(dev, + gd->name, + gd->parent_name, + gd->flags, + map, + gd->clock_idx, + gd->reset_idx, + gate_flags, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[i] = hw; + } + + return 0; +}; + +static const struct of_device_id aspeed_g6_clk_dt_ids[] = { + { .compatible = "aspeed,ast2600-scu" }, + { } +}; + +static struct platform_driver aspeed_g6_clk_driver = { + .probe = aspeed_g6_clk_probe, + .driver = { + .name = "ast2600-clk", + .of_match_table = aspeed_g6_clk_dt_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(aspeed_g6_clk_driver); + +static const u32 ast2600_a0_axi_ahb_div_table[] = { + 2, 2, 3, 5, +}; + +static const u32 ast2600_a1_axi_ahb_div_table[] = { + 4, 6, 2, 4, +}; + +static void __init aspeed_g6_cc(struct regmap *map) +{ + struct clk_hw *hw; + u32 val, div, chip_id, axi_div, ahb_div; + + clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); + + /* + * High-speed PLL clock derived from the crystal. This the CPU clock, + * and we assume that it is enabled + */ + regmap_read(map, ASPEED_HPLL_PARAM, &val); + aspeed_g6_clk_data->hws[ASPEED_CLK_HPLL] = ast2600_calc_pll("hpll", val); + + regmap_read(map, ASPEED_MPLL_PARAM, &val); + aspeed_g6_clk_data->hws[ASPEED_CLK_MPLL] = ast2600_calc_pll("mpll", val); + + regmap_read(map, ASPEED_DPLL_PARAM, &val); + aspeed_g6_clk_data->hws[ASPEED_CLK_DPLL] = ast2600_calc_pll("dpll", val); + + regmap_read(map, ASPEED_EPLL_PARAM, &val); + aspeed_g6_clk_data->hws[ASPEED_CLK_EPLL] = ast2600_calc_pll("epll", val); + + regmap_read(map, ASPEED_APLL_PARAM, &val); + aspeed_g6_clk_data->hws[ASPEED_CLK_APLL] = ast2600_calc_apll("apll", val); + + /* Strap bits 12:11 define the AXI/AHB clock frequency ratio (aka HCLK)*/ + regmap_read(map, ASPEED_G6_STRAP1, &val); + if (val & BIT(16)) + axi_div = 1; + else + axi_div = 2; + + regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); + if (chip_id & BIT(16)) + ahb_div = ast2600_a1_axi_ahb_div_table[(val >> 11) & 0x3]; + else + ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3]; + + hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div); + aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw; + + regmap_read(map, ASPEED_G6_CLK_SELECTION1, &val); + val = (val >> 23) & 0x7; + div = 4 * (val + 1); + hw = clk_hw_register_fixed_factor(NULL, "apb1", "hpll", 0, 1, div); + aspeed_g6_clk_data->hws[ASPEED_CLK_APB1] = hw; + + regmap_read(map, ASPEED_G6_CLK_SELECTION4, &val); + val = (val >> 9) & 0x7; + div = 2 * (val + 1); + hw = clk_hw_register_fixed_factor(NULL, "apb2", "ahb", 0, 1, div); + aspeed_g6_clk_data->hws[ASPEED_CLK_APB2] = hw; + + /* USB 2.0 port1 phy 40MHz clock */ + hw = clk_hw_register_fixed_rate(NULL, "usb-phy-40m", NULL, 0, 40000000); + aspeed_g6_clk_data->hws[ASPEED_CLK_USBPHY_40M] = hw; +}; + +static void __init aspeed_g6_cc_init(struct device_node *np) +{ + struct regmap *map; + int ret; + int i; + + scu_g6_base = of_iomap(np, 0); + if (!scu_g6_base) + return; + + aspeed_g6_clk_data = kzalloc(struct_size(aspeed_g6_clk_data, hws, + ASPEED_G6_NUM_CLKS), GFP_KERNEL); + if (!aspeed_g6_clk_data) + return; + + /* + * This way all clocks fetched before the platform device probes, + * except those we assign here for early use, will be deferred. + */ + for (i = 0; i < ASPEED_G6_NUM_CLKS; i++) + aspeed_g6_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); + + /* + * We check that the regmap works on this very first access, + * but as this is an MMIO-backed regmap, subsequent regmap + * access is not going to fail and we skip error checks from + * this point. + */ + map = syscon_node_to_regmap(np); + if (IS_ERR(map)) { + pr_err("no syscon regmap\n"); + return; + } + + aspeed_g6_cc(map); + aspeed_g6_clk_data->num = ASPEED_G6_NUM_CLKS; + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_g6_clk_data); + if (ret) + pr_err("failed to add DT provider: %d\n", ret); +}; +CLK_OF_DECLARE_DRIVER(aspeed_cc_g6, "aspeed,ast2600-scu", aspeed_g6_cc_init); diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c index 524bf9a5309857..e9e16425c739c4 100644 --- a/drivers/clk/clk-bulk.c +++ b/drivers/clk/clk-bulk.c @@ -18,10 +18,13 @@ static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks, int ret; int i; - for (i = 0; i < num_clks; i++) + for (i = 0; i < num_clks; i++) { + clks[i].id = NULL; clks[i].clk = NULL; + } for (i = 0; i < num_clks; i++) { + of_property_read_string_index(np, "clock-names", i, &clks[i].id); clks[i].clk = of_clk_get(np, i); if (IS_ERR(clks[i].clk)) { ret = PTR_ERR(clks[i].clk); diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c index 23c9326ea48c5e..308b353815e170 100644 --- a/drivers/clk/clk-cdce925.c +++ b/drivers/clk/clk-cdce925.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -602,6 +603,30 @@ of_clk_cdce925_get(struct of_phandle_args *clkspec, void *_data) return &data->clk[idx].hw; } +static void cdce925_regulator_disable(void *regulator) +{ + regulator_disable(regulator); +} + +static int cdce925_regulator_enable(struct device *dev, const char *name) +{ + struct regulator *regulator; + int err; + + regulator = devm_regulator_get(dev, name); + if (IS_ERR(regulator)) + return PTR_ERR(regulator); + + err = regulator_enable(regulator); + if (err) { + dev_err(dev, "Failed to enable %s: %d\n", name, err); + return err; + } + + return devm_add_action_or_reset(dev, cdce925_regulator_disable, + regulator); +} + /* The CDCE925 uses a funky way to read/write registers. Bulk mode is * just weird, so just use the single byte mode exclusively. */ static struct regmap_bus regmap_cdce925_bus = { @@ -630,6 +655,15 @@ static int cdce925_probe(struct i2c_client *client, }; dev_dbg(&client->dev, "%s\n", __func__); + + err = cdce925_regulator_enable(&client->dev, "vdd"); + if (err) + return err; + + err = cdce925_regulator_enable(&client->dev, "vddout"); + if (err) + return err; + data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index b06038b8f65860..4f13a681ddfcdd 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -3,7 +3,6 @@ * Copyright (c) 2013 NVIDIA CORPORATION. All rights reserved. */ -#include #include #include #include diff --git a/drivers/clk/clk-lochnagar.c b/drivers/clk/clk-lochnagar.c index fa8c91758b1d7d..565bcd0cdde9e9 100644 --- a/drivers/clk/clk-lochnagar.c +++ b/drivers/clk/clk-lochnagar.c @@ -198,7 +198,7 @@ static u8 lochnagar_clk_get_parent(struct clk_hw *hw) if (ret < 0) { dev_dbg(priv->dev, "Failed to read parent of %s: %d\n", lclk->name, ret); - return hw->init->num_parents; + return clk_hw_get_num_parents(hw); } val &= lclk->src_mask; diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c index 5fc78faf820c0f..80b9d78493bcd4 100644 --- a/drivers/clk/clk-milbeaut.c +++ b/drivers/clk/clk-milbeaut.c @@ -437,7 +437,7 @@ static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (readl_poll_timeout(divider->write_valid_reg, val, !val, M10V_UPOLL_RATE, M10V_UTIMEOUT)) pr_err("%s:%s couldn't stabilize\n", - __func__, divider->hw.init->name); + __func__, clk_hw_get_name(hw)); } if (divider->lock) diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 07f3b252f3e0cb..bed140f7375f0f 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -686,7 +686,7 @@ static const struct clockgen_chipinfo chipinfo[] = { .guts_compat = "fsl,qoriq-device-config-1.0", .init_periph = p5020_init_periph, .cmux_groups = { - &p2041_cmux_grp1, &p2041_cmux_grp2 + &p5020_cmux_grp1, &p5020_cmux_grp2 }, .cmux_to_group = { 0, 1, -1 diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index 72424eb7e5f877..6e780c2a9e6ba4 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -547,7 +547,6 @@ static int si5341_synth_clk_set_rate(struct clk_hw *hw, unsigned long rate, bool is_integer; n_num = synth->data->freq_vco; - n_den = rate; /* see if there's an integer solution */ r = do_div(n_num, rate); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ca99e9db657587..1c677d7f7f5307 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -37,6 +37,12 @@ static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); +static struct hlist_head *all_lists[] = { + &clk_root_list, + &clk_orphan_list, + NULL, +}; + /*** private data structures ***/ struct clk_parent_map { @@ -615,6 +621,8 @@ static void clk_core_get_boundaries(struct clk_core *core, { struct clk *clk_user; + lockdep_assert_held(&prepare_lock); + *min_rate = core->min_rate; *max_rate = core->max_rate; @@ -2460,7 +2468,7 @@ static int clk_core_set_parent_nolock(struct clk_core *core, if (core->parent == parent) return 0; - /* verify ops for for multi-parent clks */ + /* verify ops for multi-parent clks */ if (core->num_parents > 1 && !core->ops->set_parent) return -EPERM; @@ -2862,12 +2870,6 @@ static int inited = 0; static DEFINE_MUTEX(clk_debug_lock); static HLIST_HEAD(clk_debug_list); -static struct hlist_head *all_lists[] = { - &clk_root_list, - &clk_orphan_list, - NULL, -}; - static struct hlist_head *orphan_list[] = { &clk_orphan_list, NULL, @@ -2876,9 +2878,6 @@ static struct hlist_head *orphan_list[] = { static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, int level) { - if (!c) - return; - seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", level * 3 + 1, "", 30 - level * 3, c->name, @@ -2893,9 +2892,6 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, { struct clk_core *child; - if (!c) - return; - clk_summary_show_one(s, c, level); hlist_for_each_entry(child, &c->children, child_node) @@ -2925,8 +2921,9 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary); static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) { - if (!c) - return; + unsigned long min_rate, max_rate; + + clk_core_get_boundaries(c, &min_rate, &max_rate); /* This should be JSON format, i.e. elements separated with a comma */ seq_printf(s, "\"%s\": { ", c->name); @@ -2934,6 +2931,8 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); seq_printf(s, "\"protect_count\": %d,", c->protect_count); seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); + seq_printf(s, "\"min_rate\": %lu,", min_rate); + seq_printf(s, "\"max_rate\": %lu,", max_rate); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); seq_printf(s, "\"duty_cycle\": %u", @@ -2944,9 +2943,6 @@ static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) { struct clk_core *child; - if (!c) - return; - clk_dump_one(s, c, level); hlist_for_each_entry(child, &c->children, child_node) { @@ -3042,15 +3038,15 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core, */ parent = clk_core_get_parent_by_index(core, i); if (parent) - seq_printf(s, "%s", parent->name); + seq_puts(s, parent->name); else if (core->parents[i].name) - seq_printf(s, "%s", core->parents[i].name); + seq_puts(s, core->parents[i].name); else if (core->parents[i].fw_name) seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); else if (core->parents[i].index >= 0) - seq_printf(s, "%s", - of_clk_get_parent_name(core->of_node, - core->parents[i].index)); + seq_puts(s, + of_clk_get_parent_name(core->of_node, + core->parents[i].index)); else seq_puts(s, "(missing)"); @@ -3093,6 +3089,34 @@ static int clk_duty_cycle_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); +static int clk_min_rate_show(struct seq_file *s, void *data) +{ + struct clk_core *core = s->private; + unsigned long min_rate, max_rate; + + clk_prepare_lock(); + clk_core_get_boundaries(core, &min_rate, &max_rate); + clk_prepare_unlock(); + seq_printf(s, "%lu\n", min_rate); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(clk_min_rate); + +static int clk_max_rate_show(struct seq_file *s, void *data) +{ + struct clk_core *core = s->private; + unsigned long min_rate, max_rate; + + clk_prepare_lock(); + clk_core_get_boundaries(core, &min_rate, &max_rate); + clk_prepare_unlock(); + seq_printf(s, "%lu\n", max_rate); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(clk_max_rate); + static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) { struct dentry *root; @@ -3104,6 +3128,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) core->dentry = root; debugfs_create_ulong("clk_rate", 0444, root, &core->rate); + debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); + debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); debugfs_create_u32("clk_phase", 0444, root, &core->phase); debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); @@ -3513,9 +3539,9 @@ static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) return 0; } -static int clk_core_populate_parent_map(struct clk_core *core) +static int clk_core_populate_parent_map(struct clk_core *core, + const struct clk_init_data *init) { - const struct clk_init_data *init = core->hw->init; u8 num_parents = init->num_parents; const char * const *parent_names = init->parent_names; const struct clk_hw **parent_hws = init->parent_hws; @@ -3595,6 +3621,14 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) { int ret; struct clk_core *core; + const struct clk_init_data *init = hw->init; + + /* + * The init data is not supposed to be used outside of registration path. + * Set it to NULL so that provider drivers can't use it either and so that + * we catch use of hw->init early on in the core. + */ + hw->init = NULL; core = kzalloc(sizeof(*core), GFP_KERNEL); if (!core) { @@ -3602,17 +3636,17 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) goto fail_out; } - core->name = kstrdup_const(hw->init->name, GFP_KERNEL); + core->name = kstrdup_const(init->name, GFP_KERNEL); if (!core->name) { ret = -ENOMEM; goto fail_name; } - if (WARN_ON(!hw->init->ops)) { + if (WARN_ON(!init->ops)) { ret = -EINVAL; goto fail_ops; } - core->ops = hw->init->ops; + core->ops = init->ops; if (dev && pm_runtime_enabled(dev)) core->rpm_enabled = true; @@ -3621,13 +3655,13 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) if (dev && dev->driver) core->owner = dev->driver->owner; core->hw = hw; - core->flags = hw->init->flags; - core->num_parents = hw->init->num_parents; + core->flags = init->flags; + core->num_parents = init->num_parents; core->min_rate = 0; core->max_rate = ULONG_MAX; hw->core = core; - ret = clk_core_populate_parent_map(core); + ret = clk_core_populate_parent_map(core, init); if (ret) goto fail_parents; @@ -3766,6 +3800,34 @@ static const struct clk_ops clk_nodrv_ops = { .set_parent = clk_nodrv_set_parent, }; +static void clk_core_evict_parent_cache_subtree(struct clk_core *root, + struct clk_core *target) +{ + int i; + struct clk_core *child; + + for (i = 0; i < root->num_parents; i++) + if (root->parents[i].core == target) + root->parents[i].core = NULL; + + hlist_for_each_entry(child, &root->children, child_node) + clk_core_evict_parent_cache_subtree(child, target); +} + +/* Remove this clk from all parent caches */ +static void clk_core_evict_parent_cache(struct clk_core *core) +{ + struct hlist_head **lists; + struct clk_core *root; + + lockdep_assert_held(&prepare_lock); + + for (lists = all_lists; *lists; lists++) + hlist_for_each_entry(root, *lists, child_node) + clk_core_evict_parent_cache_subtree(root, core); + +} + /** * clk_unregister - unregister a currently registered clock * @clk: clock to unregister @@ -3804,6 +3866,8 @@ void clk_unregister(struct clk *clk) clk_core_set_parent_nolock(child, NULL); } + clk_core_evict_parent_cache(clk->core); + hlist_del_init(&clk->core->child_node); if (clk->core->prepare_count) @@ -4345,12 +4409,43 @@ void devm_of_clk_del_provider(struct device *dev) } EXPORT_SYMBOL(devm_of_clk_del_provider); -/* - * Beware the return values when np is valid, but no clock provider is found. - * If name == NULL, the function returns -ENOENT. - * If name != NULL, the function returns -EINVAL. This is because - * of_parse_phandle_with_args() is called even if of_property_match_string() - * returns an error. +/** + * of_parse_clkspec() - Parse a DT clock specifier for a given device node + * @np: device node to parse clock specifier from + * @index: index of phandle to parse clock out of. If index < 0, @name is used + * @name: clock name to find and parse. If name is NULL, the index is used + * @out_args: Result of parsing the clock specifier + * + * Parses a device node's "clocks" and "clock-names" properties to find the + * phandle and cells for the index or name that is desired. The resulting clock + * specifier is placed into @out_args, or an errno is returned when there's a + * parsing error. The @index argument is ignored if @name is non-NULL. + * + * Example: + * + * phandle1: clock-controller@1 { + * #clock-cells = <2>; + * } + * + * phandle2: clock-controller@2 { + * #clock-cells = <1>; + * } + * + * clock-consumer@3 { + * clocks = <&phandle1 1 2 &phandle2 3>; + * clock-names = "name1", "name2"; + * } + * + * To get a device_node for `clock-controller@2' node you may call this + * function a few different ways: + * + * of_parse_clkspec(clock-consumer@3, -1, "name2", &args); + * of_parse_clkspec(clock-consumer@3, 1, NULL, &args); + * of_parse_clkspec(clock-consumer@3, 1, "name2", &args); + * + * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT + * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in + * the "clock-names" property of @np. */ static int of_parse_clkspec(const struct device_node *np, int index, const char *name, struct of_phandle_args *out_args) diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c index 1c99e992d63882..1ac11b6a47a371 100644 --- a/drivers/clk/davinci/pll.c +++ b/drivers/clk/davinci/pll.c @@ -778,12 +778,15 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node, int i; clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); - if (!clk_data) + if (!clk_data) { + of_node_put(child); return -ENOMEM; + } clks = kmalloc_array(n_clks, sizeof(*clks), GFP_KERNEL); if (!clks) { kfree(clk_data); + of_node_put(child); return -ENOMEM; } diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 42e4667f22fd48..2022d9bead9152 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -42,6 +42,19 @@ static const struct clk_div_table ulp_div_table[] = { { .val = 7, .div = 64, }, }; +static const int pcc2_uart_clk_ids[] __initconst = { + IMX7ULP_CLK_LPUART4, + IMX7ULP_CLK_LPUART5, +}; + +static const int pcc3_uart_clk_ids[] __initconst = { + IMX7ULP_CLK_LPUART6, + IMX7ULP_CLK_LPUART7, +}; + +static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata; +static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata; + static void __init imx7ulp_clk_scg1_init(struct device_node *np) { struct clk_hw_onecell_data *clk_data; @@ -135,6 +148,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np) struct clk_hw_onecell_data *clk_data; struct clk_hw **clks; void __iomem *base; + int i; clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END), GFP_KERNEL); @@ -173,6 +187,14 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np) imx_check_clk_hws(clks, clk_data->num); of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); + + for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) { + int index = pcc2_uart_clk_ids[i]; + + pcc2_uart_clks[i] = &clks[index]->clk; + } + + imx_register_uart_clocks(pcc2_uart_clks); } CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init); @@ -181,6 +203,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np) struct clk_hw_onecell_data *clk_data; struct clk_hw **clks; void __iomem *base; + int i; clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END), GFP_KERNEL); @@ -218,6 +241,14 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np) imx_check_clk_hws(clks, clk_data->num); of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); + + for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) { + int index = pcc3_uart_clk_ids[i]; + + pcc3_uart_clks[i] = &clks[index]->clk; + } + + imx_register_uart_clocks(pcc3_uart_clks); } CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init); diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 43fa9c361fcb90..067ab876911dd4 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -22,7 +22,7 @@ static u32 share_count_sai3; static u32 share_count_sai4; static u32 share_count_sai5; static u32 share_count_sai6; -static u32 share_count_dcss; +static u32 share_count_disp; static u32 share_count_pdm; static u32 share_count_nand; @@ -38,8 +38,8 @@ static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = { }; static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = { - PLL_1443X_RATE(786432000U, 655, 5, 2, 23593), - PLL_1443X_RATE(722534400U, 301, 5, 1, 3670), + PLL_1443X_RATE(393216000U, 262, 2, 3, 9437), + PLL_1443X_RATE(361267200U, 361, 3, 3, 17511), }; static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = { @@ -51,43 +51,43 @@ static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = { PLL_1443X_RATE(650000000U, 325, 3, 2, 0), }; -static struct imx_pll14xx_clk imx8mm_audio_pll __initdata = { +static struct imx_pll14xx_clk imx8mm_audio_pll = { .type = PLL_1443X, .rate_table = imx8mm_audiopll_tbl, .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl), }; -static struct imx_pll14xx_clk imx8mm_video_pll __initdata = { +static struct imx_pll14xx_clk imx8mm_video_pll = { .type = PLL_1443X, .rate_table = imx8mm_videopll_tbl, .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl), }; -static struct imx_pll14xx_clk imx8mm_dram_pll __initdata = { +static struct imx_pll14xx_clk imx8mm_dram_pll = { .type = PLL_1443X, .rate_table = imx8mm_drampll_tbl, .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl), }; -static struct imx_pll14xx_clk imx8mm_arm_pll __initdata = { +static struct imx_pll14xx_clk imx8mm_arm_pll = { .type = PLL_1416X, .rate_table = imx8mm_pll1416x_tbl, .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), }; -static struct imx_pll14xx_clk imx8mm_gpu_pll __initdata = { +static struct imx_pll14xx_clk imx8mm_gpu_pll = { .type = PLL_1416X, .rate_table = imx8mm_pll1416x_tbl, .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), }; -static struct imx_pll14xx_clk imx8mm_vpu_pll __initdata = { +static struct imx_pll14xx_clk imx8mm_vpu_pll = { .type = PLL_1416X, .rate_table = imx8mm_pll1416x_tbl, .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), }; -static struct imx_pll14xx_clk imx8mm_sys_pll __initdata = { +static struct imx_pll14xx_clk imx8mm_sys_pll = { .type = PLL_1416X, .rate_table = imx8mm_pll1416x_tbl, .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), @@ -175,10 +175,10 @@ static const char *imx8mm_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_8 static const char *imx8mm_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", }; -static const char *imx8mm_disp_dtrc_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m", +static const char *imx8mm_disp_dtrc_sels[] = {"osc_24m", "dummy", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", }; -static const char *imx8mm_disp_dc8000_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m", +static const char *imx8mm_disp_dc8000_sels[] = {"osc_24m", "dummy", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", }; static const char *imx8mm_pcie1_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m", @@ -232,7 +232,7 @@ static const char *imx8mm_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll static const char *imx8mm_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out", "sys_pll2_250m", "video_pll1_out", }; -static const char *imx8mm_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", +static const char *imx8mm_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m", "sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m", "sys_pll3_out", "sys_pll1_100m", }; static const char *imx8mm_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", @@ -287,13 +287,13 @@ static const char *imx8mm_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_1 "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", }; static const char *imx8mm_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", - "sys3_pll2_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", }; + "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", }; static const char *imx8mm_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", }; static const char *imx8mm_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", "sys_pll1_40m", - "video_pll1_out", "sys_pll1_800m", "audio_pll1_out", "clk_ext1" }; + "video_pll1_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1" }; static const char *imx8mm_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", }; @@ -347,7 +347,7 @@ static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", }; static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m", - "audio_pll2_out", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", }; + "audio_pll2_out", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", }; static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; @@ -357,7 +357,7 @@ static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", static struct clk *clks[IMX8MM_CLK_END]; static struct clk_onecell_data clk_data; -static struct clk ** const uart_clks[] __initconst = { +static struct clk ** const uart_clks[] = { &clks[IMX8MM_CLK_UART1_ROOT], &clks[IMX8MM_CLK_UART2_ROOT], &clks[IMX8MM_CLK_UART3_ROOT], @@ -365,19 +365,20 @@ static struct clk ** const uart_clks[] __initconst = { NULL }; -static int __init imx8mm_clocks_init(struct device_node *ccm_node) +static int imx8mm_clocks_probe(struct platform_device *pdev) { - struct device_node *np; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; void __iomem *base; int ret; clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0); - clks[IMX8MM_CLK_24M] = of_clk_get_by_name(ccm_node, "osc_24m"); - clks[IMX8MM_CLK_32K] = of_clk_get_by_name(ccm_node, "osc_32k"); - clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(ccm_node, "clk_ext1"); - clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(ccm_node, "clk_ext2"); - clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(ccm_node, "clk_ext3"); - clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(ccm_node, "clk_ext4"); + clks[IMX8MM_CLK_24M] = of_clk_get_by_name(np, "osc_24m"); + clks[IMX8MM_CLK_32K] = of_clk_get_by_name(np, "osc_32k"); + clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1"); + clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2"); + clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3"); + clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4"); np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); base = of_iomap(np, 0); @@ -407,28 +408,16 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll); /* PLL bypass out */ - clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); - - /* unbypass all the plls */ - clk_set_parent(clks[IMX8MM_AUDIO_PLL1_BYPASS], clks[IMX8MM_AUDIO_PLL1]); - clk_set_parent(clks[IMX8MM_AUDIO_PLL2_BYPASS], clks[IMX8MM_AUDIO_PLL2]); - clk_set_parent(clks[IMX8MM_VIDEO_PLL1_BYPASS], clks[IMX8MM_VIDEO_PLL1]); - clk_set_parent(clks[IMX8MM_DRAM_PLL_BYPASS], clks[IMX8MM_DRAM_PLL]); - clk_set_parent(clks[IMX8MM_GPU_PLL_BYPASS], clks[IMX8MM_GPU_PLL]); - clk_set_parent(clks[IMX8MM_VPU_PLL_BYPASS], clks[IMX8MM_VPU_PLL]); - clk_set_parent(clks[IMX8MM_ARM_PLL_BYPASS], clks[IMX8MM_ARM_PLL]); - clk_set_parent(clks[IMX8MM_SYS_PLL1_BYPASS], clks[IMX8MM_SYS_PLL1]); - clk_set_parent(clks[IMX8MM_SYS_PLL2_BYPASS], clks[IMX8MM_SYS_PLL2]); - clk_set_parent(clks[IMX8MM_SYS_PLL3_BYPASS], clks[IMX8MM_SYS_PLL3]); + clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); /* PLL out gate */ clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13); @@ -463,10 +452,10 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2); clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1); - np = ccm_node; - base = of_iomap(np, 0); - if (WARN_ON(!base)) - return -ENOMEM; + np = dev->of_node; + base = devm_platform_ioremap_resource(pdev, 0); + if (WARN_ON(IS_ERR(base))) + return PTR_ERR(base); /* Core Slice */ clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels)); @@ -614,7 +603,7 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0); - clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0); + clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0); clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0); clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0); clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0); @@ -627,10 +616,10 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0); clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm); clks[IMX8MM_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm); - clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss); - clks[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss); - clks[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss); - clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss); + clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_disp); + clks[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp); + clks[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp); + clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_disp); clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0); clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0); clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0); @@ -658,11 +647,30 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); if (ret < 0) { pr_err("failed to register clks for i.MX8MM\n"); - return -EINVAL; + goto unregister_clks; } imx_register_uart_clocks(uart_clks); return 0; + +unregister_clks: + imx_unregister_clocks(clks, ARRAY_SIZE(clks)); + + return ret; } -CLK_OF_DECLARE_DRIVER(imx8mm, "fsl,imx8mm-ccm", imx8mm_clocks_init); + +static const struct of_device_id imx8mm_clk_of_match[] = { + { .compatible = "fsl,imx8mm-ccm" }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, imx8mm_clk_of_match); + +static struct platform_driver imx8mm_clk_driver = { + .probe = imx8mm_clocks_probe, + .driver = { + .name = "imx8mm-ccm", + .of_match_table = of_match_ptr(imx8mm_clk_of_match), + }, +}; +module_platform_driver(imx8mm_clk_driver); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 07481a53e33611..47a4b44ba3cb00 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -42,6 +42,8 @@ enum { static const struct imx_pll14xx_rate_table imx8mn_pll1416x_tbl[] = { PLL_1416X_RATE(1800000000U, 225, 3, 0), PLL_1416X_RATE(1600000000U, 200, 3, 0), + PLL_1416X_RATE(1500000000U, 375, 3, 1), + PLL_1416X_RATE(1400000000U, 350, 3, 1), PLL_1416X_RATE(1200000000U, 300, 3, 1), PLL_1416X_RATE(1000000000U, 250, 3, 1), PLL_1416X_RATE(800000000U, 200, 3, 1), @@ -51,8 +53,8 @@ static const struct imx_pll14xx_rate_table imx8mn_pll1416x_tbl[] = { }; static const struct imx_pll14xx_rate_table imx8mn_audiopll_tbl[] = { - PLL_1443X_RATE(786432000U, 655, 5, 2, 23593), - PLL_1443X_RATE(722534400U, 301, 5, 1, 3670), + PLL_1443X_RATE(393216000U, 262, 2, 3, 9437), + PLL_1443X_RATE(361267200U, 361, 3, 3, 17511), }; static const struct imx_pll14xx_rate_table imx8mn_videopll_tbl[] = { @@ -67,36 +69,43 @@ static const struct imx_pll14xx_rate_table imx8mn_drampll_tbl[] = { static struct imx_pll14xx_clk imx8mn_audio_pll = { .type = PLL_1443X, .rate_table = imx8mn_audiopll_tbl, + .rate_count = ARRAY_SIZE(imx8mn_audiopll_tbl), }; static struct imx_pll14xx_clk imx8mn_video_pll = { .type = PLL_1443X, .rate_table = imx8mn_videopll_tbl, + .rate_count = ARRAY_SIZE(imx8mn_videopll_tbl), }; static struct imx_pll14xx_clk imx8mn_dram_pll = { .type = PLL_1443X, .rate_table = imx8mn_drampll_tbl, + .rate_count = ARRAY_SIZE(imx8mn_drampll_tbl), }; static struct imx_pll14xx_clk imx8mn_arm_pll = { .type = PLL_1416X, .rate_table = imx8mn_pll1416x_tbl, + .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl), }; static struct imx_pll14xx_clk imx8mn_gpu_pll = { .type = PLL_1416X, .rate_table = imx8mn_pll1416x_tbl, + .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl), }; static struct imx_pll14xx_clk imx8mn_vpu_pll = { .type = PLL_1416X, .rate_table = imx8mn_pll1416x_tbl, + .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl), }; static struct imx_pll14xx_clk imx8mn_sys_pll = { .type = PLL_1416X, .rate_table = imx8mn_pll1416x_tbl, + .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl), }; static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", }; @@ -140,7 +149,7 @@ static const char * const imx8mn_disp_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "clk_ext1", "clk_ext4", }; static const char * const imx8mn_disp_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m", - "sys_pll3_out", "sys1_pll_40m", "audio_pll2_out", + "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", }; static const char * const imx8mn_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", @@ -219,9 +228,9 @@ static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "aud "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out", "sys_pll2_250m", "video_pll1_out", }; -static const char * const imx8mn_qspi_sels[] = {"osc_24m", "sys1_pll_400m", "sys_pll1_800m", - "sys2_pll_500m", "audio_pll2_out", "sys1_pll_266m", - "sys3_pll2_out", "sys1_pll_100m", }; +static const char * const imx8mn_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m", + "sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m", + "sys_pll3_out", "sys_pll1_100m", }; static const char * const imx8mn_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m", @@ -271,6 +280,10 @@ static const char * const imx8mn_usb_phy_sels[] = {"osc_24m", "sys_pll1_100m", " "sys_pll2_100m", "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; +static const char * const imx8mn_gic_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", + "sys_pll2_100m", "sys_pll1_800m", "clk_ext2", + "clk_ext4", "audio_pll2_out" }; + static const char * const imx8mn_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", }; @@ -288,7 +301,7 @@ static const char * const imx8mn_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys "sys_pll1_80m", "video_pll1_out", }; static const char * const imx8mn_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", - "sys_pll1_40m", "sys3_pll2_out", "clk_ext2", + "sys_pll1_40m", "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", }; static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", @@ -317,7 +330,7 @@ static const char * const imx8mn_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", " static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m", - "audio_pll2_clk", "sys_pll1_100m", }; + "audio_pll2_out", "sys_pll1_100m", }; static const char * const imx8mn_camera_pixel_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", @@ -346,7 +359,7 @@ static const char * const imx8mn_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audi static const char * const imx8mn_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", - "sys_pll1_200m", "audio_pll2_clk", "vpu_pll", + "sys_pll1_200m", "audio_pll2_out", "vpu_pll", "sys_pll1_80m", }; static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", @@ -355,6 +368,14 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy static struct clk *clks[IMX8MN_CLK_END]; static struct clk_onecell_data clk_data; +static struct clk ** const uart_clks[] = { + &clks[IMX8MN_CLK_UART1_ROOT], + &clks[IMX8MN_CLK_UART2_ROOT], + &clks[IMX8MN_CLK_UART3_ROOT], + &clks[IMX8MN_CLK_UART4_ROOT], + NULL +}; + static int imx8mn_clocks_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -400,40 +421,28 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mn_sys_pll); /* PLL bypass out */ - clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); - - /* unbypass all the plls */ - clk_set_parent(clks[IMX8MN_AUDIO_PLL1_BYPASS], clks[IMX8MN_AUDIO_PLL1]); - clk_set_parent(clks[IMX8MN_AUDIO_PLL2_BYPASS], clks[IMX8MN_AUDIO_PLL2]); - clk_set_parent(clks[IMX8MN_VIDEO_PLL1_BYPASS], clks[IMX8MN_VIDEO_PLL1]); - clk_set_parent(clks[IMX8MN_DRAM_PLL_BYPASS], clks[IMX8MN_DRAM_PLL]); - clk_set_parent(clks[IMX8MN_GPU_PLL_BYPASS], clks[IMX8MN_GPU_PLL]); - clk_set_parent(clks[IMX8MN_VPU_PLL_BYPASS], clks[IMX8MN_VPU_PLL]); - clk_set_parent(clks[IMX8MN_ARM_PLL_BYPASS], clks[IMX8MN_ARM_PLL]); - clk_set_parent(clks[IMX8MN_SYS_PLL1_BYPASS], clks[IMX8MN_SYS_PLL1]); - clk_set_parent(clks[IMX8MN_SYS_PLL2_BYPASS], clks[IMX8MN_SYS_PLL2]); - clk_set_parent(clks[IMX8MN_SYS_PLL3_BYPASS], clks[IMX8MN_SYS_PLL3]); + clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); /* PLL out gate */ clks[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13); clks[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13); clks[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13); clks[IMX8MN_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13); - clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 13); - clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 13); - clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 13); - clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 13); - clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 13); - clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 13); + clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11); + clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11); + clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11); + clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 11); + clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 11); + clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11); /* SYS PLL fixed output */ clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20); @@ -516,6 +525,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) clks[IMX8MN_CLK_UART4] = imx8m_clk_composite("uart4", imx8mn_uart4_sels, base + 0xb080); clks[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100); clks[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180); + clks[IMX8MN_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mn_gic_sels, base + 0xb200); clks[IMX8MN_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280); clks[IMX8MN_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300); clks[IMX8MN_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380); @@ -612,6 +622,8 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) goto unregister_clks; } + imx_register_uart_clocks(uart_clks); + return 0; unregister_clks: diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index d407a07e7e6dd2..41fc9c63356ea9 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -41,34 +41,34 @@ static const char * const dram_pll_out_sels[] = {"dram_pll1_ref_sel", }; /* CCM ROOT */ static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", - "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", }; + "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll_out", }; static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m", - "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", }; + "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll_out", }; static const char * const imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", }; -static const char * const imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", +static const char * const imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll_out", "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; -static const char * const imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", +static const char * const imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll_out", "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; static const char * const imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m", "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",}; static const char * const imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m", - "sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", }; + "sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll_out", }; static const char * const imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m", - "sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", }; + "sys1_pll_133m", "sys3_pll_out", "sys2_pll_250m", "audio_pll1_out", }; -static const char * const imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", }; +static const char * const imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", }; -static const char * const imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", }; +static const char * const imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", }; -static const char * const imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", +static const char * const imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll_out", "sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", }; static const char * const imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m", @@ -77,53 +77,53 @@ static const char * const imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", static const char * const imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m", "sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", }; -static const char * const imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", +static const char * const imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll_out", "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; -static const char * const imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", +static const char * const imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll_out", "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; -static const char * const imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m", +static const char * const imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll_out", "sys2_pll_1000m", "sys2_pll_500m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; -static const char * const imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m", +static const char * const imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll_out", "sys2_pll_333m", "sys2_pll_200m", "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", }; static const char * const imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m", - "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; + "sys2_pll_125m", "sys3_pll_out", "audio_pll1_out", "video_pll1_out", }; static const char * const imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m", - "sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; + "sys2_pll_166m", "sys3_pll_out", "audio_pll1_out", "video_pll1_out", }; static const char * const imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"}; + "sys2_pll_1000m", "sys3_pll_out", "clk_ext3", "audio_pll2_out"}; static const char * const imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m", "sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", }; static const char * const imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", - "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; + "sys1_pll_800m", "sys3_pll_out", "sys2_pll_250m", "audio_pll2_out", }; -static const char * const imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; +static const char * const imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll_out", "audio_pll1_out", }; -static const char * const imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; +static const char * const imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll_out", "audio_pll1_out", }; -static const char * const imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; +static const char * const imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll_out", "audio_pll2_out", }; -static const char * const imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; +static const char * const imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll_out", "audio_pll2_out", }; static const char * const imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", - "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", }; + "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll_out", }; static const char * const imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2", "clk_ext3", "clk_ext4", }; -static const char * const imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out", +static const char * const imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll_out", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; -static const char * const imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; +static const char * const imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll_out", "clk_ext4", }; -static const char * const imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; +static const char * const imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll_out", "clk_ext4", }; static const char * const imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", }; @@ -151,40 +151,40 @@ static const char * const imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", " "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; static const char * const imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m", - "audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", }; + "audio_pll2_out", "sys3_pll_out", "sys2_pll_250m", "video_pll1_out", }; static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", - "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; + "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", - "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; + "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", - "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; + "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; -static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", +static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; -static const char * const imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", +static const char * const imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; -static const char * const imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", +static const char * const imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; -static const char * const imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", +static const char * const imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; static const char * const imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", - "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; + "sys3_pll_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; static const char * const imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", - "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; + "sys3_pll_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; static const char * const imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", - "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; + "sys3_pll_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; static const char * const imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", - "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; + "sys3_pll_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; static const char * const imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; @@ -196,79 +196,79 @@ static const char * const imx8mq_gic_sels[] = {"osc_25m", "sys2_pll_200m", "sys1 "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out" }; static const char * const imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", - "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; + "sys1_pll_800m", "sys3_pll_out", "sys2_pll_250m", "audio_pll2_out", }; static const char * const imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", - "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; + "sys1_pll_800m", "sys3_pll_out", "sys2_pll_250m", "audio_pll2_out", }; static const char * const imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", - "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; + "sys3_pll_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; static const char * const imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", - "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; + "sys3_pll_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; static const char * const imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", - "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; + "sys3_pll_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; static const char * const imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", - "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; + "sys3_pll_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; static const char * const imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m", "sys1_pll_80m", "audio_pll1_out", "clk_ext1", }; static const char * const imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out", - "sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", }; + "sys2_pll_125m", "sys3_pll_out", "sys1_pll_80m", "sys2_pll_166m", }; -static const char * const imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m", +static const char * const imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll_out", "sys2_pll_200m", "sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", }; static const char * const imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; + "sys2_pll_1000m", "sys3_pll_out", "audio_pll2_out", "video_pll1_out", }; static const char * const imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; static const char * const imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; + "sys2_pll_1000m", "sys3_pll_out", "audio_pll2_out", "video_pll1_out", }; static const char * const imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; + "sys2_pll_1000m", "sys3_pll_out", "clk_ext3", "audio_pll2_out", }; static const char * const imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; + "sys2_pll_1000m", "sys3_pll_out", "audio_pll2_out", "video_pll1_out", }; static const char * const imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; static const char * const imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; + "sys2_pll_1000m", "sys3_pll_out", "clk_ext3", "audio_pll2_out", }; static const char * const imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; + "sys2_pll_1000m", "sys3_pll_out", "audio_pll2_out", "video_pll1_out", }; static const char * const imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; static const char * const imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", - "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; + "sys2_pll_1000m", "sys3_pll_out", "clk_ext3", "audio_pll2_out", }; static const char * const imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", - "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", }; + "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll_out", }; static const char * const imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", }; -static const char * const imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out", +static const char * const imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll_out", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; static const char * const imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", - "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; + "sys1_pll_800m", "sys3_pll_out", "sys2_pll_250m", "audio_pll2_out", }; static const char * const imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "osc_27m", "sys1_pll_200m", "audio_pll2_out", "sys2_pll_500m", "vpu_pll_out", "sys1_pll_80m", }; static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m", - "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", "ckil", }; + "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "ckil", }; static struct clk_onecell_data clk_data; @@ -406,7 +406,8 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80); /* AHB */ - clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite("ahb", imx8mq_ahb_sels, base + 0x9000); + /* AHB clock is used by the AHB bus therefore marked as critical */ + clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000); clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100); /* IPG */ @@ -523,8 +524,8 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); clks[IMX8MQ_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); clks[IMX8MQ_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0); - clks[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0); - clks[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_gate4("usb2_ctrl_root_clk", "usb_core_ref", base + 0x44e0, 0); + clks[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0); + clks[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_gate4("usb2_ctrl_root_clk", "usb_bus", base + 0x44e0, 0); clks[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0); clks[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0); clks[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0); @@ -539,7 +540,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss); clks[IMX8MQ_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss); clks[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss); - clks[IMX8MQ_CLK_TMU_ROOT] = imx_clk_gate4_flags("tmu_root_clk", "ipg_root", base + 0x4620, 0, CLK_IS_CRITICAL); + clks[IMX8MQ_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0); clks[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE); clks[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0); clks[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0); @@ -561,10 +562,18 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clk_data.clk_num = ARRAY_SIZE(clks); err = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); - WARN_ON(err); + if (err < 0) { + dev_err(dev, "failed to register clks for i.MX8MQ\n"); + goto unregister_clks; + } imx_register_uart_clocks(uart_clks); + return 0; + +unregister_clks: + imx_unregister_clocks(clks, ARRAY_SIZE(clks)); + return err; } diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index b7213023b238fd..7a815ec76aa5c8 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -191,6 +191,10 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate, tmp &= ~RST_MASK; writel_relaxed(tmp, pll->base); + /* Enable BYPASS */ + tmp |= BYPASS_MASK; + writel(tmp, pll->base); + div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) | (rate->sdiv << SDIV_SHIFT); writel_relaxed(div_val, pll->base + 0x4); @@ -250,6 +254,10 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate, tmp &= ~RST_MASK; writel_relaxed(tmp, pll->base); + /* Enable BYPASS */ + tmp |= BYPASS_MASK; + writel_relaxed(tmp, pll->base); + div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) | (rate->sdiv << SDIV_SHIFT); writel_relaxed(div_val, pll->base + 0x4); @@ -283,16 +291,28 @@ static int clk_pll14xx_prepare(struct clk_hw *hw) { struct clk_pll14xx *pll = to_clk_pll14xx(hw); u32 val; + int ret; /* * RESETB = 1 from 0, PLL starts its normal * operation after lock time */ val = readl_relaxed(pll->base + GNRL_CTL); + if (val & RST_MASK) + return 0; + val |= BYPASS_MASK; + writel_relaxed(val, pll->base + GNRL_CTL); val |= RST_MASK; writel_relaxed(val, pll->base + GNRL_CTL); - return clk_pll14xx_wait_lock(pll); + ret = clk_pll14xx_wait_lock(pll); + if (ret) + return ret; + + val &= ~BYPASS_MASK; + writel_relaxed(val, pll->base + GNRL_CTL); + + return 0; } static int clk_pll14xx_is_prepared(struct clk_hw *hw) @@ -348,6 +368,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, struct clk_pll14xx *pll; struct clk *clk; struct clk_init_data init; + u32 val; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) @@ -379,6 +400,10 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, pll->rate_table = pll_clk->rate_table; pll->rate_count = pll_clk->rate_count; + val = readl_relaxed(pll->base + GNRL_CTL); + val &= ~BYPASS_MASK; + writel_relaxed(val, pll->base + GNRL_CTL); + clk = clk_register(NULL, &pll->hw); if (IS_ERR(clk)) { pr_err("%s: failed to register pll %s %lu\n", diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index bb4ec1b33fafea..f7a389a50401ab 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -10,7 +10,6 @@ extern spinlock_t imx_ccm_lock; void imx_check_clocks(struct clk *clks[], unsigned int count); void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count); void imx_register_uart_clocks(struct clk ** const clks[]); -void imx_register_uart_clocks_hws(struct clk_hw ** const hws[]); void imx_mmdc_mask_handshake(void __iomem *ccm_base, unsigned int chn); void imx_unregister_clocks(struct clk *clks[], unsigned int count); @@ -51,12 +50,6 @@ struct imx_pll14xx_clk { int flags; }; -#define imx_clk_busy_divider(name, parent_name, reg, shift, width, busy_reg, busy_shift) \ - imx_clk_hw_busy_divider(name, parent_name, reg, shift, width, busy_reg, busy_shift)->clk - -#define imx_clk_busy_mux(name, reg, shift, width, busy_reg, busy_shift, parent_names, num_parents) \ - imx_clk_hw_busy_mux(name, reg, shift, width, busy_reg, busy_shift, parent_names, num_parents)->clk - #define imx_clk_cpu(name, parent_name, div, mux, pll, step) \ imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk @@ -74,15 +67,6 @@ struct imx_pll14xx_clk { #define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \ imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk -#define imx_clk_fixup_divider(name, parent, reg, shift, width, fixup) \ - imx_clk_hw_fixup_divider(name, parent, reg, shift, width, fixup)->clk - -#define imx_clk_fixup_mux(name, reg, shift, width, parents, num_parents, fixup) \ - imx_clk_hw_fixup_mux(name, reg, shift, width, parents, num_parents, fixup)->clk - -#define imx_clk_mux_ldb(name, reg, shift, width, parents, num_parents) \ - imx_clk_hw_mux_ldb(name, reg, shift, width, parents, num_parents)->clk - #define imx_clk_fixed_factor(name, parent, mult, div) \ imx_clk_hw_fixed_factor(name, parent, mult, div)->clk @@ -92,21 +76,12 @@ struct imx_pll14xx_clk { #define imx_clk_gate_dis(name, parent, reg, shift) \ imx_clk_hw_gate_dis(name, parent, reg, shift)->clk -#define imx_clk_gate_dis_flags(name, parent, reg, shift, flags) \ - imx_clk_hw_gate_dis_flags(name, parent, reg, shift, flags)->clk - -#define imx_clk_gate_flags(name, parent, reg, shift, flags) \ - imx_clk_hw_gate_flags(name, parent, reg, shift, flags)->clk - #define imx_clk_gate2(name, parent, reg, shift) \ imx_clk_hw_gate2(name, parent, reg, shift)->clk #define imx_clk_gate2_flags(name, parent, reg, shift, flags) \ imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk -#define imx_clk_gate2_shared(name, parent, reg, shift, share_count) \ - imx_clk_hw_gate2_shared(name, parent, reg, shift, share_count)->clk - #define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \ imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 2642d36d1e2cb6..a3b4635f627846 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -257,4 +257,4 @@ static void __init jz4725b_cgu_init(struct device_node *np) ingenic_cgu_register_syscore_ops(cgu); } -CLK_OF_DECLARE(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init); +CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init); diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c index 4c0a20949c2c27..978f32dd424a6b 100644 --- a/drivers/clk/ingenic/jz4740-cgu.c +++ b/drivers/clk/ingenic/jz4740-cgu.c @@ -53,6 +53,10 @@ static const u8 jz4740_cgu_cpccr_div_table[] = { 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, }; +static const u8 jz4740_cgu_pll_half_div_table[] = { + 2, 1, +}; + static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { /* External clocks */ @@ -86,7 +90,10 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { [JZ4740_CLK_PLL_HALF] = { "pll half", CGU_CLK_DIV, .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1 }, + .div = { + CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1, + jz4740_cgu_pll_half_div_table, + }, }, [JZ4740_CLK_CCLK] = { @@ -241,4 +248,4 @@ static void __init jz4740_cgu_init(struct device_node *np) ingenic_cgu_register_syscore_ops(cgu); } -CLK_OF_DECLARE(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init); +CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init); diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index eebc1bea3841ea..956dd653a43d22 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -443,4 +443,4 @@ static void __init jz4770_cgu_init(struct device_node *np) } /* We only probe via devicetree, no need for a platform driver */ -CLK_OF_DECLARE(jz4770_cgu, "ingenic,jz4770-cgu", jz4770_cgu_init); +CLK_OF_DECLARE_DRIVER(jz4770_cgu, "ingenic,jz4770-cgu", jz4770_cgu_init); diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c index 8c67f89df25e54..ea905ff72bf034 100644 --- a/drivers/clk/ingenic/jz4780-cgu.c +++ b/drivers/clk/ingenic/jz4780-cgu.c @@ -725,4 +725,4 @@ static void __init jz4780_cgu_init(struct device_node *np) ingenic_cgu_register_syscore_ops(cgu); } -CLK_OF_DECLARE(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init); +CLK_OF_DECLARE_DRIVER(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init); diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index ce3d9b300bab57..7efc3617bbd553 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -117,6 +117,62 @@ config COMMON_CLK_MT2712_VENCSYS ---help--- This driver supports MediaTek MT2712 vencsys clocks. +config COMMON_CLK_MT6779 + bool "Clock driver for MediaTek MT6779" + depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST + select COMMON_CLK_MEDIATEK + default ARCH_MEDIATEK && ARM64 + help + This driver supports MediaTek MT6779 basic clocks. + +config COMMON_CLK_MT6779_MMSYS + bool "Clock driver for MediaTek MT6779 mmsys" + depends on COMMON_CLK_MT6779 + help + This driver supports MediaTek MT6779 mmsys clocks. + +config COMMON_CLK_MT6779_IMGSYS + bool "Clock driver for MediaTek MT6779 imgsys" + depends on COMMON_CLK_MT6779 + help + This driver supports MediaTek MT6779 imgsys clocks. + +config COMMON_CLK_MT6779_IPESYS + bool "Clock driver for MediaTek MT6779 ipesys" + depends on COMMON_CLK_MT6779 + help + This driver supports MediaTek MT6779 ipesys clocks. + +config COMMON_CLK_MT6779_CAMSYS + bool "Clock driver for MediaTek MT6779 camsys" + depends on COMMON_CLK_MT6779 + help + This driver supports MediaTek MT6779 camsys clocks. + +config COMMON_CLK_MT6779_VDECSYS + bool "Clock driver for MediaTek MT6779 vdecsys" + depends on COMMON_CLK_MT6779 + help + This driver supports MediaTek MT6779 vdecsys clocks. + +config COMMON_CLK_MT6779_VENCSYS + bool "Clock driver for MediaTek MT6779 vencsys" + depends on COMMON_CLK_MT6779 + help + This driver supports MediaTek MT6779 vencsys clocks. + +config COMMON_CLK_MT6779_MFGCFG + bool "Clock driver for MediaTek MT6779 mfgcfg" + depends on COMMON_CLK_MT6779 + help + This driver supports MediaTek MT6779 mfgcfg clocks. + +config COMMON_CLK_MT6779_AUDSYS + bool "Clock driver for Mediatek MT6779 audsys" + depends on COMMON_CLK_MT6779 + help + This driver supports Mediatek MT6779 audsys clocks. + config COMMON_CLK_MT6797 bool "Clock driver for MediaTek MT6797" depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile index 672de0099eef83..8cdb76a5cd713f 100644 --- a/drivers/clk/mediatek/Makefile +++ b/drivers/clk/mediatek/Makefile @@ -1,6 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o +obj-$(CONFIG_COMMON_CLK_MT6779) += clk-mt6779.o +obj-$(CONFIG_COMMON_CLK_MT6779_MMSYS) += clk-mt6779-mm.o +obj-$(CONFIG_COMMON_CLK_MT6779_IMGSYS) += clk-mt6779-img.o +obj-$(CONFIG_COMMON_CLK_MT6779_IPESYS) += clk-mt6779-ipe.o +obj-$(CONFIG_COMMON_CLK_MT6779_CAMSYS) += clk-mt6779-cam.o +obj-$(CONFIG_COMMON_CLK_MT6779_VDECSYS) += clk-mt6779-vdec.o +obj-$(CONFIG_COMMON_CLK_MT6779_VENCSYS) += clk-mt6779-venc.o +obj-$(CONFIG_COMMON_CLK_MT6779_MFGCFG) += clk-mt6779-mfg.o +obj-$(CONFIG_COMMON_CLK_MT6779_AUDSYS) += clk-mt6779-aud.o obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c index 803bf0ae1fd65a..a35cf0b2215011 100644 --- a/drivers/clk/mediatek/clk-gate.c +++ b/drivers/clk/mediatek/clk-gate.c @@ -150,7 +150,8 @@ struct clk *mtk_clk_register_gate( int sta_ofs, u8 bit, const struct clk_ops *ops, - unsigned long flags) + unsigned long flags, + struct device *dev) { struct mtk_clk_gate *cg; struct clk *clk; @@ -174,7 +175,7 @@ struct clk *mtk_clk_register_gate( cg->hw.init = &init; - clk = clk_register(NULL, &cg->hw); + clk = clk_register(dev, &cg->hw); if (IS_ERR(clk)) kfree(cg); diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h index e05c736974859d..3c3329ec54b7af 100644 --- a/drivers/clk/mediatek/clk-gate.h +++ b/drivers/clk/mediatek/clk-gate.h @@ -40,7 +40,8 @@ struct clk *mtk_clk_register_gate( int sta_ofs, u8 bit, const struct clk_ops *ops, - unsigned long flags); + unsigned long flags, + struct device *dev); #define GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, \ _ops, _flags) { \ diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c new file mode 100644 index 00000000000000..11b209f95e2547 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-aud.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include +#include +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs audio0_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x0, + .sta_ofs = 0x0, +}; + +static const struct mtk_gate_regs audio1_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x4, + .sta_ofs = 0x4, +}; + +#define GATE_AUDIO0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr) +#define GATE_AUDIO1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr) + +static const struct mtk_gate audio_clks[] = { + /* AUDIO0 */ + GATE_AUDIO0(CLK_AUD_AFE, "aud_afe", "audio_sel", 2), + GATE_AUDIO0(CLK_AUD_22M, "aud_22m", "aud_eng1_sel", 8), + GATE_AUDIO0(CLK_AUD_24M, "aud_24m", "aud_eng2_sel", 9), + GATE_AUDIO0(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner", + "aud_eng2_sel", 18), + GATE_AUDIO0(CLK_AUD_APLL_TUNER, "aud_apll_tuner", + "aud_eng1_sel", 19), + GATE_AUDIO0(CLK_AUD_TDM, "aud_tdm", "aud_eng1_sel", 20), + GATE_AUDIO0(CLK_AUD_ADC, "aud_adc", "audio_sel", 24), + GATE_AUDIO0(CLK_AUD_DAC, "aud_dac", "audio_sel", 25), + GATE_AUDIO0(CLK_AUD_DAC_PREDIS, "aud_dac_predis", + "audio_sel", 26), + GATE_AUDIO0(CLK_AUD_TML, "aud_tml", "audio_sel", 27), + GATE_AUDIO0(CLK_AUD_NLE, "aud_nle", "audio_sel", 28), + /* AUDIO1 */ + GATE_AUDIO1(CLK_AUD_I2S1_BCLK_SW, "aud_i2s1_bclk", + "audio_sel", 4), + GATE_AUDIO1(CLK_AUD_I2S2_BCLK_SW, "aud_i2s2_bclk", + "audio_sel", 5), + GATE_AUDIO1(CLK_AUD_I2S3_BCLK_SW, "aud_i2s3_bclk", + "audio_sel", 6), + GATE_AUDIO1(CLK_AUD_I2S4_BCLK_SW, "aud_i2s4_bclk", + "audio_sel", 7), + GATE_AUDIO1(CLK_AUD_I2S5_BCLK_SW, "aud_i2s5_bclk", + "audio_sel", 8), + GATE_AUDIO1(CLK_AUD_CONN_I2S_ASRC, "aud_conn_i2s", + "audio_sel", 12), + GATE_AUDIO1(CLK_AUD_GENERAL1_ASRC, "aud_general1", + "audio_sel", 13), + GATE_AUDIO1(CLK_AUD_GENERAL2_ASRC, "aud_general2", + "audio_sel", 14), + GATE_AUDIO1(CLK_AUD_DAC_HIRES, "aud_dac_hires", + "audio_h_sel", 15), + GATE_AUDIO1(CLK_AUD_ADC_HIRES, "aud_adc_hires", + "audio_h_sel", 16), + GATE_AUDIO1(CLK_AUD_ADC_HIRES_TML, "aud_adc_hires_tml", + "audio_h_sel", 17), + GATE_AUDIO1(CLK_AUD_PDN_ADDA6_ADC, "aud_pdn_adda6_adc", + "audio_sel", 20), + GATE_AUDIO1(CLK_AUD_ADDA6_ADC_HIRES, "aud_adda6_adc_hires", + "audio_h_sel", + 21), + GATE_AUDIO1(CLK_AUD_3RD_DAC, "aud_3rd_dac", "audio_sel", + 28), + GATE_AUDIO1(CLK_AUD_3RD_DAC_PREDIS, "aud_3rd_dac_predis", + "audio_sel", 29), + GATE_AUDIO1(CLK_AUD_3RD_DAC_TML, "aud_3rd_dac_tml", + "audio_sel", 30), + GATE_AUDIO1(CLK_AUD_3RD_DAC_HIRES, "aud_3rd_dac_hires", + "audio_h_sel", 31), +}; + +static const struct of_device_id of_match_clk_mt6779_aud[] = { + { .compatible = "mediatek,mt6779-audio", }, + {} +}; + +static int clk_mt6779_aud_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK); + + mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static struct platform_driver clk_mt6779_aud_drv = { + .probe = clk_mt6779_aud_probe, + .driver = { + .name = "clk-mt6779-aud", + .of_match_table = of_match_clk_mt6779_aud, + }, +}; + +builtin_platform_driver(clk_mt6779_aud_drv); diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c new file mode 100644 index 00000000000000..244d4208b7fb3e --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-cam.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +static const struct mtk_gate_regs cam_cg_regs = { + .set_ofs = 0x0004, + .clr_ofs = 0x0008, + .sta_ofs = 0x0000, +}; + +#define GATE_CAM(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate cam_clks[] = { + GATE_CAM(CLK_CAM_LARB10, "camsys_larb10", "cam_sel", 0), + GATE_CAM(CLK_CAM_DFP_VAD, "camsys_dfp_vad", "cam_sel", 1), + GATE_CAM(CLK_CAM_LARB11, "camsys_larb11", "cam_sel", 2), + GATE_CAM(CLK_CAM_LARB9, "camsys_larb9", "cam_sel", 3), + GATE_CAM(CLK_CAM_CAM, "camsys_cam", "cam_sel", 6), + GATE_CAM(CLK_CAM_CAMTG, "camsys_camtg", "cam_sel", 7), + GATE_CAM(CLK_CAM_SENINF, "camsys_seninf", "cam_sel", 8), + GATE_CAM(CLK_CAM_CAMSV0, "camsys_camsv0", "cam_sel", 9), + GATE_CAM(CLK_CAM_CAMSV1, "camsys_camsv1", "cam_sel", 10), + GATE_CAM(CLK_CAM_CAMSV2, "camsys_camsv2", "cam_sel", 11), + GATE_CAM(CLK_CAM_CAMSV3, "camsys_camsv3", "cam_sel", 12), + GATE_CAM(CLK_CAM_CCU, "camsys_ccu", "cam_sel", 13), + GATE_CAM(CLK_CAM_FAKE_ENG, "camsys_fake_eng", "cam_sel", 14), +}; + +static const struct of_device_id of_match_clk_mt6779_cam[] = { + { .compatible = "mediatek,mt6779-camsys", }, + {} +}; + +static int clk_mt6779_cam_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK); + + mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static struct platform_driver clk_mt6779_cam_drv = { + .probe = clk_mt6779_cam_probe, + .driver = { + .name = "clk-mt6779-cam", + .of_match_table = of_match_clk_mt6779_cam, + }, +}; + +builtin_platform_driver(clk_mt6779_cam_drv); diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c new file mode 100644 index 00000000000000..26292a45c6133b --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-img.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +static const struct mtk_gate_regs img_cg_regs = { + .set_ofs = 0x0004, + .clr_ofs = 0x0008, + .sta_ofs = 0x0000, +}; + +#define GATE_IMG(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate img_clks[] = { + GATE_IMG(CLK_IMG_LARB5, "imgsys_larb5", "img_sel", 0), + GATE_IMG(CLK_IMG_LARB6, "imgsys_larb6", "img_sel", 1), + GATE_IMG(CLK_IMG_DIP, "imgsys_dip", "img_sel", 2), + GATE_IMG(CLK_IMG_MFB, "imgsys_mfb", "img_sel", 6), + GATE_IMG(CLK_IMG_WPE_A, "imgsys_wpe_a", "img_sel", 7), +}; + +static const struct of_device_id of_match_clk_mt6779_img[] = { + { .compatible = "mediatek,mt6779-imgsys", }, + {} +}; + +static int clk_mt6779_img_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK); + + mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static struct platform_driver clk_mt6779_img_drv = { + .probe = clk_mt6779_img_probe, + .driver = { + .name = "clk-mt6779-img", + .of_match_table = of_match_clk_mt6779_img, + }, +}; + +builtin_platform_driver(clk_mt6779_img_drv); diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c new file mode 100644 index 00000000000000..bb519075639cb8 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-ipe.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +static const struct mtk_gate_regs ipe_cg_regs = { + .set_ofs = 0x0004, + .clr_ofs = 0x0008, + .sta_ofs = 0x0000, +}; + +#define GATE_IPE(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate ipe_clks[] = { + GATE_IPE(CLK_IPE_LARB7, "ipe_larb7", "ipe_sel", 0), + GATE_IPE(CLK_IPE_LARB8, "ipe_larb8", "ipe_sel", 1), + GATE_IPE(CLK_IPE_SMI_SUBCOM, "ipe_smi_subcom", "ipe_sel", 2), + GATE_IPE(CLK_IPE_FD, "ipe_fd", "ipe_sel", 3), + GATE_IPE(CLK_IPE_FE, "ipe_fe", "ipe_sel", 4), + GATE_IPE(CLK_IPE_RSC, "ipe_rsc", "ipe_sel", 5), + GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "ipe_sel", 6), +}; + +static const struct of_device_id of_match_clk_mt6779_ipe[] = { + { .compatible = "mediatek,mt6779-ipesys", }, + {} +}; + +static int clk_mt6779_ipe_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IPE_NR_CLK); + + mtk_clk_register_gates(node, ipe_clks, ARRAY_SIZE(ipe_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static struct platform_driver clk_mt6779_ipe_drv = { + .probe = clk_mt6779_ipe_probe, + .driver = { + .name = "clk-mt6779-ipe", + .of_match_table = of_match_clk_mt6779_ipe, + }, +}; + +builtin_platform_driver(clk_mt6779_ipe_drv); diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c new file mode 100644 index 00000000000000..c6ee2a89c07077 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-mfg.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs mfg_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_MFG(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate mfg_clks[] = { + GATE_MFG(CLK_MFGCFG_BG3D, "mfg_bg3d", "mfg_sel", 0), +}; + +static int clk_mt6779_mfg_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_MFGCFG_NR_CLK); + + mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt6779_mfg[] = { + { .compatible = "mediatek,mt6779-mfgcfg", }, + {} +}; + +static struct platform_driver clk_mt6779_mfg_drv = { + .probe = clk_mt6779_mfg_probe, + .driver = { + .name = "clk-mt6779-mfg", + .of_match_table = of_match_clk_mt6779_mfg, + }, +}; + +builtin_platform_driver(clk_mt6779_mfg_drv); diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c new file mode 100644 index 00000000000000..fb5fbb8e3e418b --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-mm.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +static const struct mtk_gate_regs mm0_cg_regs = { + .set_ofs = 0x0104, + .clr_ofs = 0x0108, + .sta_ofs = 0x0100, +}; + +static const struct mtk_gate_regs mm1_cg_regs = { + .set_ofs = 0x0114, + .clr_ofs = 0x0118, + .sta_ofs = 0x0110, +}; + +#define GATE_MM0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) +#define GATE_MM1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate mm_clks[] = { + /* MM0 */ + GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0), + GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1), + GATE_MM0(CLK_MM_SMI_LARB1, "mm_smi_larb1", "mm_sel", 2), + GATE_MM0(CLK_MM_GALS_COMM0, "mm_gals_comm0", "mm_sel", 3), + GATE_MM0(CLK_MM_GALS_COMM1, "mm_gals_comm1", "mm_sel", 4), + GATE_MM0(CLK_MM_GALS_CCU2MM, "mm_gals_ccu2mm", "mm_sel", 5), + GATE_MM0(CLK_MM_GALS_IPU12MM, "mm_gals_ipu12mm", "mm_sel", 6), + GATE_MM0(CLK_MM_GALS_IMG2MM, "mm_gals_img2mm", "mm_sel", 7), + GATE_MM0(CLK_MM_GALS_CAM2MM, "mm_gals_cam2mm", "mm_sel", 8), + GATE_MM0(CLK_MM_GALS_IPU2MM, "mm_gals_ipu2mm", "mm_sel", 9), + GATE_MM0(CLK_MM_MDP_DL_TXCK, "mm_mdp_dl_txck", "mm_sel", 10), + GATE_MM0(CLK_MM_IPU_DL_TXCK, "mm_ipu_dl_txck", "mm_sel", 11), + GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 12), + GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 13), + GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 14), + GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 15), + GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 16), + GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 17), + GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 18), + GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 19), + GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 20), + GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 21), + GATE_MM0(CLK_MM_DISP_OVL1_2L, "mm_disp_ovl1_2l", "mm_sel", 22), + GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 23), + GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 24), + GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 25), + GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 26), + GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_sel", 27), + GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_sel", 28), + GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_sel", 29), + GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_sel", 30), + GATE_MM0(CLK_MM_DISP_SPLIT, "mm_disp_split", "mm_sel", 31), + /* MM1 */ + GATE_MM1(CLK_MM_DSI0_MM_CK, "mm_dsi0_mmck", "mm_sel", 0), + GATE_MM1(CLK_MM_DSI0_IF_CK, "mm_dsi0_ifck", "mm_sel", 1), + GATE_MM1(CLK_MM_DPI_MM_CK, "mm_dpi_mmck", "mm_sel", 2), + GATE_MM1(CLK_MM_DPI_IF_CK, "mm_dpi_ifck", "dpi0_sel", 3), + GATE_MM1(CLK_MM_FAKE_ENG2, "mm_fake_eng2", "mm_sel", 4), + GATE_MM1(CLK_MM_MDP_DL_RX_CK, "mm_mdp_dl_rxck", "mm_sel", 5), + GATE_MM1(CLK_MM_IPU_DL_RX_CK, "mm_ipu_dl_rxck", "mm_sel", 6), + GATE_MM1(CLK_MM_26M, "mm_26m", "f_f26m_ck", 7), + GATE_MM1(CLK_MM_MM_R2Y, "mm_mmsys_r2y", "mm_sel", 8), + GATE_MM1(CLK_MM_DISP_RSZ, "mm_disp_rsz", "mm_sel", 9), + GATE_MM1(CLK_MM_MDP_AAL, "mm_mdp_aal", "mm_sel", 10), + GATE_MM1(CLK_MM_MDP_HDR, "mm_mdp_hdr", "mm_sel", 11), + GATE_MM1(CLK_MM_DBI_MM_CK, "mm_dbi_mmck", "mm_sel", 12), + GATE_MM1(CLK_MM_DBI_IF_CK, "mm_dbi_ifck", "dpi0_sel", 13), + GATE_MM1(CLK_MM_DISP_POSTMASK0, "mm_disp_pm0", "mm_sel", 14), + GATE_MM1(CLK_MM_DISP_HRT_BW, "mm_disp_hrt_bw", "mm_sel", 15), + GATE_MM1(CLK_MM_DISP_OVL_FBDC, "mm_disp_ovl_fbdc", "mm_sel", 16), +}; + +static const struct of_device_id of_match_clk_mt6779_mm[] = { + { .compatible = "mediatek,mt6779-mmsys", }, + {} +}; + +static int clk_mt6779_mm_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); + + mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static struct platform_driver clk_mt6779_mm_drv = { + .probe = clk_mt6779_mm_probe, + .driver = { + .name = "clk-mt6779-mm", + .of_match_table = of_match_clk_mt6779_mm, + }, +}; + +builtin_platform_driver(clk_mt6779_mm_drv); diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c new file mode 100644 index 00000000000000..1900da2586a19b --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-vdec.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs vdec0_cg_regs = { + .set_ofs = 0x0000, + .clr_ofs = 0x0004, + .sta_ofs = 0x0000, +}; + +static const struct mtk_gate_regs vdec1_cg_regs = { + .set_ofs = 0x0008, + .clr_ofs = 0x000c, + .sta_ofs = 0x0008, +}; + +#define GATE_VDEC0_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr_inv) +#define GATE_VDEC1_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr_inv) + +static const struct mtk_gate vdec_clks[] = { + /* VDEC0 */ + GATE_VDEC0_I(CLK_VDEC_VDEC, "vdec_cken", "vdec_sel", 0), + /* VDEC1 */ + GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1_cken", "vdec_sel", 0), +}; + +static const struct of_device_id of_match_clk_mt6779_vdec[] = { + { .compatible = "mediatek,mt6779-vdecsys", }, + {} +}; + +static int clk_mt6779_vdec_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_VDEC_GCON_NR_CLK); + + mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static struct platform_driver clk_mt6779_vdec_drv = { + .probe = clk_mt6779_vdec_probe, + .driver = { + .name = "clk-mt6779-vdec", + .of_match_table = of_match_clk_mt6779_vdec, + }, +}; + +builtin_platform_driver(clk_mt6779_vdec_drv); diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c new file mode 100644 index 00000000000000..b41d1f859edc9d --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779-venc.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs venc_cg_regs = { + .set_ofs = 0x0004, + .clr_ofs = 0x0008, + .sta_ofs = 0x0000, +}; + +#define GATE_VENC_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr_inv) + +static const struct mtk_gate venc_clks[] = { + GATE_VENC_I(CLK_VENC_GCON_LARB, "venc_larb", "venc_sel", 0), + GATE_VENC_I(CLK_VENC_GCON_VENC, "venc_venc", "venc_sel", 4), + GATE_VENC_I(CLK_VENC_GCON_JPGENC, "venc_jpgenc", "venc_sel", 8), + GATE_VENC_I(CLK_VENC_GCON_GALS, "venc_gals", "venc_sel", 28), +}; + +static const struct of_device_id of_match_clk_mt6779_venc[] = { + { .compatible = "mediatek,mt6779-vencsys", }, + {} +}; + +static int clk_mt6779_venc_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_VENC_GCON_NR_CLK); + + mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static struct platform_driver clk_mt6779_venc_drv = { + .probe = clk_mt6779_venc_probe, + .driver = { + .name = "clk-mt6779-venc", + .of_match_table = of_match_clk_mt6779_venc, + }, +}; + +builtin_platform_driver(clk_mt6779_venc_drv); diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c new file mode 100644 index 00000000000000..608a9a6621a37b --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6779.c @@ -0,0 +1,1315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#include +#include +#include +#include + +#include "clk-mtk.h" +#include "clk-mux.h" +#include "clk-gate.h" + +#include + +static DEFINE_SPINLOCK(mt6779_clk_lock); + +static const struct mtk_fixed_clk top_fixed_clks[] = { + FIXED_CLK(CLK_TOP_CLK26M, "f_f26m_ck", "clk26m", 26000000), +}; + +static const struct mtk_fixed_factor top_divs[] = { + FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2), + FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, 2), + FACTOR(CLK_TOP_MAINPLL_CK, "mainpll_ck", "mainpll", 1, 1), + FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll_ck", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D2_D2, "mainpll_d2_d2", "mainpll_d2", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D2_D4, "mainpll_d2_d4", "mainpll_d2", 1, 4), + FACTOR(CLK_TOP_MAINPLL_D2_D8, "mainpll_d2_d8", "mainpll_d2", 1, 8), + FACTOR(CLK_TOP_MAINPLL_D2_D16, "mainpll_d2_d16", "mainpll_d2", 1, 16), + FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3), + FACTOR(CLK_TOP_MAINPLL_D3_D2, "mainpll_d3_d2", "mainpll_d3", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D3_D4, "mainpll_d3_d4", "mainpll_d3", 1, 4), + FACTOR(CLK_TOP_MAINPLL_D3_D8, "mainpll_d3_d8", "mainpll_d3", 1, 8), + FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5), + FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4), + FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7), + FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_CK, "univpll", "univ2pll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D2_D2, "univpll_d2_d2", "univpll_d2", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D2_D4, "univpll_d2_d4", "univpll_d2", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D2_D8, "univpll_d2_d8", "univpll_d2", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIVPLL_D3_D2, "univpll_d3_d2", "univpll_d3", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D3_D4, "univpll_d3_d4", "univpll_d3", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D3_D8, "univpll_d3_d8", "univpll_d3", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D3_D16, "univpll_d3_d16", "univpll_d3", 1, 16), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7), + FACTOR(CLK_TOP_UNIVP_192M_CK, "univpll_192m_ck", "univ2pll", 1, 13), + FACTOR(CLK_TOP_UNIVP_192M_D2, "univpll_192m_d2", "univpll_192m_ck", + 1, 2), + FACTOR(CLK_TOP_UNIVP_192M_D4, "univpll_192m_d4", "univpll_192m_ck", + 1, 4), + FACTOR(CLK_TOP_UNIVP_192M_D8, "univpll_192m_d8", "univpll_192m_ck", + 1, 8), + FACTOR(CLK_TOP_UNIVP_192M_D16, "univpll_192m_d16", "univpll_192m_ck", + 1, 16), + FACTOR(CLK_TOP_UNIVP_192M_D32, "univpll_192m_d32", "univpll_192m_ck", + 1, 32), + FACTOR(CLK_TOP_APLL1_CK, "apll1_ck", "apll1", 1, 1), + FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1, 2), + FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4), + FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1, 8), + FACTOR(CLK_TOP_APLL2_CK, "apll2_ck", "apll2", 1, 1), + FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1, 2), + FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4), + FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1, 8), + FACTOR(CLK_TOP_TVDPLL_CK, "tvdpll_ck", "tvdpll", 1, 1), + FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1, 2), + FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4), + FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1, 8), + FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1, 16), + FACTOR(CLK_TOP_MMPLL_CK, "mmpll_ck", "mmpll", 1, 1), + FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1, 4), + FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1, 2), + FACTOR(CLK_TOP_MMPLL_D4_D4, "mmpll_d4_d4", "mmpll_d4", 1, 4), + FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, 5), + FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1, 2), + FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1, 4), + FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1, 6), + FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, 7), + FACTOR(CLK_TOP_MFGPLL_CK, "mfgpll_ck", "mfgpll", 1, 1), + FACTOR(CLK_TOP_ADSPPLL_CK, "adsppll_ck", "adsppll", 1, 1), + FACTOR(CLK_TOP_ADSPPLL_D4, "adsppll_d4", "adsppll", 1, 4), + FACTOR(CLK_TOP_ADSPPLL_D5, "adsppll_d5", "adsppll", 1, 5), + FACTOR(CLK_TOP_ADSPPLL_D6, "adsppll_d6", "adsppll", 1, 6), + FACTOR(CLK_TOP_MSDCPLL_CK, "msdcpll_ck", "msdcpll", 1, 1), + FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2), + FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4), + FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, 8), + FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16), + FACTOR(CLK_TOP_AD_OSC_CK, "ad_osc_ck", "osc", 1, 1), + FACTOR(CLK_TOP_OSC_D2, "osc_d2", "osc", 1, 2), + FACTOR(CLK_TOP_OSC_D4, "osc_d4", "osc", 1, 4), + FACTOR(CLK_TOP_OSC_D8, "osc_d8", "osc", 1, 8), + FACTOR(CLK_TOP_OSC_D10, "osc_d10", "osc", 1, 10), + FACTOR(CLK_TOP_OSC_D16, "osc_d16", "osc", 1, 16), + FACTOR(CLK_TOP_AD_OSC2_CK, "ad_osc2_ck", "osc2", 1, 1), + FACTOR(CLK_TOP_OSC2_D2, "osc2_d2", "osc2", 1, 2), + FACTOR(CLK_TOP_OSC2_D3, "osc2_d3", "osc2", 1, 3), + FACTOR(CLK_TOP_TVDPLL_MAINPLL_D2_CK, "tvdpll_mainpll_d2_ck", + "tvdpll", 1, 1), + FACTOR(CLK_TOP_FMEM_466M_CK, "fmem_466m_ck", "fmem", 1, 1), +}; + +static const char * const axi_parents[] = { + "clk26m", + "mainpll_d2_d4", + "mainpll_d7", + "osc_d4" +}; + +static const char * const mm_parents[] = { + "clk26m", + "tvdpll_mainpll_d2_ck", + "mmpll_d7", + "mmpll_d5_d2", + "mainpll_d2_d2", + "mainpll_d3_d2" +}; + +static const char * const scp_parents[] = { + "clk26m", + "univpll_d2_d8", + "mainpll_d2_d4", + "mainpll_d3", + "univpll_d3", + "ad_osc2_ck", + "osc2_d2", + "osc2_d3" +}; + +static const char * const img_parents[] = { + "clk26m", + "mainpll_d2", + "mainpll_d2", + "univpll_d3", + "mainpll_d3", + "mmpll_d5_d2", + "tvdpll_mainpll_d2_ck", + "mainpll_d5" +}; + +static const char * const ipe_parents[] = { + "clk26m", + "mainpll_d2", + "mmpll_d7", + "univpll_d3", + "mainpll_d3", + "mmpll_d5_d2", + "mainpll_d2_d2", + "mainpll_d5" +}; + +static const char * const dpe_parents[] = { + "clk26m", + "mainpll_d2", + "mmpll_d7", + "univpll_d3", + "mainpll_d3", + "mmpll_d5_d2", + "mainpll_d2_d2", + "mainpll_d5" +}; + +static const char * const cam_parents[] = { + "clk26m", + "mainpll_d2", + "mmpll_d6", + "mainpll_d3", + "mmpll_d7", + "univpll_d3", + "mmpll_d5_d2", + "adsppll_d5", + "tvdpll_mainpll_d2_ck", + "univpll_d3_d2" +}; + +static const char * const ccu_parents[] = { + "clk26m", + "mainpll_d2", + "mmpll_d6", + "mainpll_d3", + "mmpll_d7", + "univpll_d3", + "mmpll_d5_d2", + "mainpll_d2_d2", + "adsppll_d5", + "univpll_d3_d2" +}; + +static const char * const dsp_parents[] = { + "clk26m", + "univpll_d3_d8", + "univpll_d3_d4", + "mainpll_d2_d4", + "univpll_d3_d2", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mmpll_d7", + "mmpll_d6", + "adsppll_d5", + "tvdpll_ck", + "tvdpll_mainpll_d2_ck", + "univpll_d2", + "adsppll_d4" +}; + +static const char * const dsp1_parents[] = { + "clk26m", + "univpll_d3_d8", + "univpll_d3_d4", + "mainpll_d2_d4", + "univpll_d3_d2", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mmpll_d7", + "mmpll_d6", + "adsppll_d5", + "tvdpll_ck", + "tvdpll_mainpll_d2_ck", + "univpll_d2", + "adsppll_d4" +}; + +static const char * const dsp2_parents[] = { + "clk26m", + "univpll_d3_d8", + "univpll_d3_d4", + "mainpll_d2_d4", + "univpll_d3_d2", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mmpll_d7", + "mmpll_d6", + "adsppll_d5", + "tvdpll_ck", + "tvdpll_mainpll_d2_ck", + "univpll_d2", + "adsppll_d4" +}; + +static const char * const dsp3_parents[] = { + "clk26m", + "univpll_d3_d8", + "mainpll_d2_d4", + "univpll_d3_d2", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mmpll_d7", + "mmpll_d6", + "mainpll_d2", + "tvdpll_ck", + "tvdpll_mainpll_d2_ck", + "univpll_d2", + "adsppll_d4", + "mmpll_d4" +}; + +static const char * const ipu_if_parents[] = { + "clk26m", + "univpll_d3_d8", + "univpll_d3_d4", + "mainpll_d2_d4", + "univpll_d3_d2", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mmpll_d7", + "mmpll_d6", + "adsppll_d5", + "tvdpll_ck", + "tvdpll_mainpll_d2_ck", + "univpll_d2", + "adsppll_d4" +}; + +static const char * const mfg_parents[] = { + "clk26m", + "mfgpll_ck", + "univpll_d3", + "mainpll_d5" +}; + +static const char * const f52m_mfg_parents[] = { + "clk26m", + "univpll_d3_d2", + "univpll_d3_d4", + "univpll_d3_d8" +}; + +static const char * const camtg_parents[] = { + "clk26m", + "univpll_192m_d8", + "univpll_d3_d8", + "univpll_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univpll_192m_d16", + "univpll_192m_d32" +}; + +static const char * const camtg2_parents[] = { + "clk26m", + "univpll_192m_d8", + "univpll_d3_d8", + "univpll_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univpll_192m_d16", + "univpll_192m_d32" +}; + +static const char * const camtg3_parents[] = { + "clk26m", + "univpll_192m_d8", + "univpll_d3_d8", + "univpll_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univpll_192m_d16", + "univpll_192m_d32" +}; + +static const char * const camtg4_parents[] = { + "clk26m", + "univpll_192m_d8", + "univpll_d3_d8", + "univpll_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univpll_192m_d16", + "univpll_192m_d32" +}; + +static const char * const uart_parents[] = { + "clk26m", + "univpll_d3_d8" +}; + +static const char * const spi_parents[] = { + "clk26m", + "mainpll_d5_d2", + "mainpll_d3_d4", + "msdcpll_d4" +}; + +static const char * const msdc50_hclk_parents[] = { + "clk26m", + "mainpll_d2_d2", + "mainpll_d3_d2" +}; + +static const char * const msdc50_0_parents[] = { + "clk26m", + "msdcpll_ck", + "msdcpll_d2", + "univpll_d2_d4", + "mainpll_d3_d2", + "univpll_d2_d2" +}; + +static const char * const msdc30_1_parents[] = { + "clk26m", + "univpll_d3_d2", + "mainpll_d3_d2", + "mainpll_d7", + "msdcpll_d2" +}; + +static const char * const audio_parents[] = { + "clk26m", + "mainpll_d5_d4", + "mainpll_d7_d4", + "mainpll_d2_d16" +}; + +static const char * const aud_intbus_parents[] = { + "clk26m", + "mainpll_d2_d4", + "mainpll_d7_d2" +}; + +static const char * const fpwrap_ulposc_parents[] = { + "osc_d10", + "clk26m", + "osc_d4", + "osc_d8", + "osc_d16" +}; + +static const char * const atb_parents[] = { + "clk26m", + "mainpll_d2_d2", + "mainpll_d5" +}; + +static const char * const sspm_parents[] = { + "clk26m", + "univpll_d2_d4", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3" +}; + +static const char * const dpi0_parents[] = { + "clk26m", + "tvdpll_d2", + "tvdpll_d4", + "tvdpll_d8", + "tvdpll_d16" +}; + +static const char * const scam_parents[] = { + "clk26m", + "mainpll_d5_d2" +}; + +static const char * const disppwm_parents[] = { + "clk26m", + "univpll_d3_d4", + "osc_d2", + "osc_d4", + "osc_d16" +}; + +static const char * const usb_top_parents[] = { + "clk26m", + "univpll_d5_d4", + "univpll_d3_d4", + "univpll_d5_d2" +}; + +static const char * const ssusb_top_xhci_parents[] = { + "clk26m", + "univpll_d5_d4", + "univpll_d3_d4", + "univpll_d5_d2" +}; + +static const char * const spm_parents[] = { + "clk26m", + "osc_d8", + "mainpll_d2_d8" +}; + +static const char * const i2c_parents[] = { + "clk26m", + "mainpll_d2_d8", + "univpll_d5_d2" +}; + +static const char * const seninf_parents[] = { + "clk26m", + "univpll_d7", + "univpll_d3_d2", + "univpll_d2_d2", + "mainpll_d3", + "mmpll_d4_d2", + "mmpll_d7", + "mmpll_d6" +}; + +static const char * const seninf1_parents[] = { + "clk26m", + "univpll_d7", + "univpll_d3_d2", + "univpll_d2_d2", + "mainpll_d3", + "mmpll_d4_d2", + "mmpll_d7", + "mmpll_d6" +}; + +static const char * const seninf2_parents[] = { + "clk26m", + "univpll_d7", + "univpll_d3_d2", + "univpll_d2_d2", + "mainpll_d3", + "mmpll_d4_d2", + "mmpll_d7", + "mmpll_d6" +}; + +static const char * const dxcc_parents[] = { + "clk26m", + "mainpll_d2_d2", + "mainpll_d2_d4", + "mainpll_d2_d8" +}; + +static const char * const aud_engen1_parents[] = { + "clk26m", + "apll1_d2", + "apll1_d4", + "apll1_d8" +}; + +static const char * const aud_engen2_parents[] = { + "clk26m", + "apll2_d2", + "apll2_d4", + "apll2_d8" +}; + +static const char * const faes_ufsfde_parents[] = { + "clk26m", + "mainpll_d2", + "mainpll_d2_d2", + "mainpll_d3", + "mainpll_d2_d4", + "univpll_d3" +}; + +static const char * const fufs_parents[] = { + "clk26m", + "mainpll_d2_d4", + "mainpll_d2_d8", + "mainpll_d2_d16" +}; + +static const char * const aud_1_parents[] = { + "clk26m", + "apll1_ck" +}; + +static const char * const aud_2_parents[] = { + "clk26m", + "apll2_ck" +}; + +static const char * const adsp_parents[] = { + "clk26m", + "mainpll_d3", + "univpll_d2_d4", + "univpll_d2", + "mmpll_d4", + "adsppll_d4", + "adsppll_d6" +}; + +static const char * const dpmaif_parents[] = { + "clk26m", + "univpll_d2_d4", + "mainpll_d3", + "mainpll_d2_d2", + "univpll_d2_d2", + "univpll_d3" +}; + +static const char * const venc_parents[] = { + "clk26m", + "mmpll_d7", + "mainpll_d3", + "univpll_d2_d2", + "mainpll_d2_d2", + "univpll_d3", + "mmpll_d6", + "mainpll_d5", + "mainpll_d3_d2", + "mmpll_d4_d2", + "univpll_d2_d4", + "mmpll_d5", + "univpll_192m_d2" + +}; + +static const char * const vdec_parents[] = { + "clk26m", + "univpll_d2_d4", + "mainpll_d3", + "univpll_d2_d2", + "mainpll_d2_d2", + "univpll_d3", + "univpll_d5", + "univpll_d5_d2", + "mainpll_d2", + "univpll_d2", + "univpll_192m_d2" +}; + +static const char * const camtm_parents[] = { + "clk26m", + "univpll_d7", + "univpll_d3_d2", + "univpll_d2_d2" +}; + +static const char * const pwm_parents[] = { + "clk26m", + "univpll_d2_d8" +}; + +static const char * const audio_h_parents[] = { + "clk26m", + "univpll_d7", + "apll1_ck", + "apll2_ck" +}; + +static const char * const camtg5_parents[] = { + "clk26m", + "univpll_192m_d8", + "univpll_d3_d8", + "univpll_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univpll_192m_d16", + "univpll_192m_d32" +}; + +/* + * CRITICAL CLOCK: + * axi_sel is the main bus clock of whole SOC. + * spm_sel is the clock of the always-on co-processor. + * sspm_sel is the clock of the always-on co-processor. + */ +static const struct mtk_mux top_muxes[] = { + /* CLK_CFG_0 */ + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI, "axi_sel", axi_parents, + 0x20, 0x24, 0x28, 0, 2, 7, + 0x004, 0, CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MM, "mm_sel", mm_parents, + 0x20, 0x24, 0x28, 8, 3, 15, 0x004, 1), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP, "scp_sel", scp_parents, + 0x20, 0x24, 0x28, 16, 3, 23, 0x004, 2), + /* CLK_CFG_1 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG, "img_sel", img_parents, + 0x30, 0x34, 0x38, 0, 3, 7, 0x004, 4), + MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE, "ipe_sel", ipe_parents, + 0x30, 0x34, 0x38, 8, 3, 15, 0x004, 5), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DPE, "dpe_sel", dpe_parents, + 0x30, 0x34, 0x38, 16, 3, 23, 0x004, 6), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAM, "cam_sel", cam_parents, + 0x30, 0x34, 0x38, 24, 4, 31, 0x004, 7), + /* CLK_CFG_2 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_CCU, "ccu_sel", ccu_parents, + 0x40, 0x44, 0x48, 0, 4, 7, 0x004, 8), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP, "dsp_sel", dsp_parents, + 0x40, 0x44, 0x48, 8, 4, 15, 0x004, 9), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP1, "dsp1_sel", dsp1_parents, + 0x40, 0x44, 0x48, 16, 4, 23, 0x004, 10), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP2, "dsp2_sel", dsp2_parents, + 0x40, 0x44, 0x48, 24, 4, 31, 0x004, 11), + /* CLK_CFG_3 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP3, "dsp3_sel", dsp3_parents, + 0x50, 0x54, 0x58, 0, 4, 7, 0x004, 12), + MUX_GATE_CLR_SET_UPD(CLK_TOP_IPU_IF, "ipu_if_sel", ipu_if_parents, + 0x50, 0x54, 0x58, 8, 4, 15, 0x004, 13), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG, "mfg_sel", mfg_parents, + 0x50, 0x54, 0x58, 16, 2, 23, 0x004, 14), + MUX_GATE_CLR_SET_UPD(CLK_TOP_F52M_MFG, "f52m_mfg_sel", + f52m_mfg_parents, 0x50, 0x54, 0x58, + 24, 2, 31, 0x004, 15), + /* CLK_CFG_4 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG, "camtg_sel", camtg_parents, + 0x60, 0x64, 0x68, 0, 3, 7, 0x004, 16), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2, "camtg2_sel", camtg2_parents, + 0x60, 0x64, 0x68, 8, 3, 15, 0x004, 17), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3, "camtg3_sel", camtg3_parents, + 0x60, 0x64, 0x68, 16, 3, 23, 0x004, 18), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4, "camtg4_sel", camtg4_parents, + 0x60, 0x64, 0x68, 24, 3, 31, 0x004, 19), + /* CLK_CFG_5 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_UART, "uart_sel", uart_parents, + 0x70, 0x74, 0x78, 0, 1, 7, 0x004, 20), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI, "spi_sel", spi_parents, + 0x70, 0x74, 0x78, 8, 2, 15, 0x004, 21), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK, "msdc50_hclk_sel", + msdc50_hclk_parents, 0x70, 0x74, 0x78, + 16, 2, 23, 0x004, 22), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0, "msdc50_0_sel", + msdc50_0_parents, 0x70, 0x74, 0x78, + 24, 3, 31, 0x004, 23), + /* CLK_CFG_6 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1, "msdc30_1_sel", + msdc30_1_parents, 0x80, 0x84, 0x88, + 0, 3, 7, 0x004, 24), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD, "audio_sel", audio_parents, + 0x80, 0x84, 0x88, 8, 2, 15, 0x004, 25), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS, "aud_intbus_sel", + aud_intbus_parents, 0x80, 0x84, 0x88, + 16, 2, 23, 0x004, 26), + MUX_GATE_CLR_SET_UPD(CLK_TOP_FPWRAP_ULPOSC, "fpwrap_ulposc_sel", + fpwrap_ulposc_parents, 0x80, 0x84, 0x88, + 24, 3, 31, 0x004, 27), + /* CLK_CFG_7 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB, "atb_sel", atb_parents, + 0x90, 0x94, 0x98, 0, 2, 7, 0x004, 28), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SSPM, "sspm_sel", sspm_parents, + 0x90, 0x94, 0x98, 8, 3, 15, + 0x004, 29, CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI0, "dpi0_sel", dpi0_parents, + 0x90, 0x94, 0x98, 16, 3, 23, 0x004, 30), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SCAM, "scam_sel", scam_parents, + 0x90, 0x94, 0x98, 24, 1, 31, 0x004, 0), + /* CLK_CFG_8 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM, "disppwm_sel", + disppwm_parents, 0xa0, 0xa4, 0xa8, + 0, 3, 7, 0x008, 1), + MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP, "usb_top_sel", + usb_top_parents, 0xa0, 0xa4, 0xa8, + 8, 2, 15, 0x008, 2), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_TOP_XHCI, "ssusb_top_xhci_sel", + ssusb_top_xhci_parents, 0xa0, 0xa4, 0xa8, + 16, 2, 23, 0x008, 3), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM, "spm_sel", spm_parents, + 0xa0, 0xa4, 0xa8, 24, 2, 31, + 0x008, 4, CLK_IS_CRITICAL), + /* CLK_CFG_9 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C, "i2c_sel", i2c_parents, + 0xb0, 0xb4, 0xb8, 0, 2, 7, 0x008, 5), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF, "seninf_sel", seninf_parents, + 0xb0, 0xb4, 0xb8, 8, 2, 15, 0x008, 6), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1, "seninf1_sel", + seninf1_parents, 0xb0, 0xb4, 0xb8, + 16, 2, 23, 0x008, 7), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2, "seninf2_sel", + seninf2_parents, 0xb0, 0xb4, 0xb8, + 24, 2, 31, 0x008, 8), + /* CLK_CFG_10 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC, "dxcc_sel", dxcc_parents, + 0xc0, 0xc4, 0xc8, 0, 2, 7, 0x008, 9), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENG1, "aud_eng1_sel", + aud_engen1_parents, 0xc0, 0xc4, 0xc8, + 8, 2, 15, 0x008, 10), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENG2, "aud_eng2_sel", + aud_engen2_parents, 0xc0, 0xc4, 0xc8, + 16, 2, 23, 0x008, 11), + MUX_GATE_CLR_SET_UPD(CLK_TOP_FAES_UFSFDE, "faes_ufsfde_sel", + faes_ufsfde_parents, 0xc0, 0xc4, 0xc8, + 24, 3, 31, + 0x008, 12), + /* CLK_CFG_11 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_FUFS, "fufs_sel", fufs_parents, + 0xd0, 0xd4, 0xd8, 0, 2, 7, 0x008, 13), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1, "aud_1_sel", aud_1_parents, + 0xd0, 0xd4, 0xd8, 8, 1, 15, 0x008, 14), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_2, "aud_2_sel", aud_2_parents, + 0xd0, 0xd4, 0xd8, 16, 1, 23, 0x008, 15), + MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP, "adsp_sel", adsp_parents, + 0xd0, 0xd4, 0xd8, 24, 3, 31, 0x008, 16), + /* CLK_CFG_12 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_DPMAIF, "dpmaif_sel", dpmaif_parents, + 0xe0, 0xe4, 0xe8, 0, 3, 7, 0x008, 17), + MUX_GATE_CLR_SET_UPD(CLK_TOP_VENC, "venc_sel", venc_parents, + 0xe0, 0xe4, 0xe8, 8, 4, 15, 0x008, 18), + MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC, "vdec_sel", vdec_parents, + 0xe0, 0xe4, 0xe8, 16, 4, 23, 0x008, 19), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM, "camtm_sel", camtm_parents, + 0xe0, 0xe4, 0xe8, 24, 2, 31, 0x004, 20), + /* CLK_CFG_13 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM, "pwm_sel", pwm_parents, + 0xf0, 0xf4, 0xf8, 0, 1, 7, 0x008, 21), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_H, "audio_h_sel", + audio_h_parents, 0xf0, 0xf4, 0xf8, + 8, 2, 15, 0x008, 22), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5, "camtg5_sel", camtg5_parents, + 0xf0, 0xf4, 0xf8, 24, 3, 31, 0x008, 24), +}; + +static const char * const i2s0_m_ck_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const i2s1_m_ck_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const i2s2_m_ck_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const i2s3_m_ck_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const i2s4_m_ck_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const i2s5_m_ck_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const struct mtk_composite top_aud_muxes[] = { + MUX(CLK_TOP_I2S0_M_SEL, "i2s0_m_ck_sel", i2s0_m_ck_parents, + 0x320, 8, 1), + MUX(CLK_TOP_I2S1_M_SEL, "i2s1_m_ck_sel", i2s1_m_ck_parents, + 0x320, 9, 1), + MUX(CLK_TOP_I2S2_M_SEL, "i2s2_m_ck_sel", i2s2_m_ck_parents, + 0x320, 10, 1), + MUX(CLK_TOP_I2S3_M_SEL, "i2s3_m_ck_sel", i2s3_m_ck_parents, + 0x320, 11, 1), + MUX(CLK_TOP_I2S4_M_SEL, "i2s4_m_ck_sel", i2s4_m_ck_parents, + 0x320, 12, 1), + MUX(CLK_TOP_I2S5_M_SEL, "i2s5_m_ck_sel", i2s5_m_ck_parents, + 0x328, 20, 1), +}; + +static struct mtk_composite top_aud_divs[] = { + DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "i2s0_m_ck_sel", + 0x320, 2, 0x324, 8, 0), + DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "i2s1_m_ck_sel", + 0x320, 3, 0x324, 8, 8), + DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "i2s2_m_ck_sel", + 0x320, 4, 0x324, 8, 16), + DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "i2s3_m_ck_sel", + 0x320, 5, 0x324, 8, 24), + DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "i2s4_m_ck_sel", + 0x320, 6, 0x328, 8, 0), + DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4", + 0x320, 7, 0x328, 8, 8), + DIV_GATE(CLK_TOP_APLL12_DIV5, "apll12_div5", "i2s5_m_ck_sel", + 0x328, 16, 0x328, 4, 28), +}; + +static const struct mtk_gate_regs infra0_cg_regs = { + .set_ofs = 0x80, + .clr_ofs = 0x84, + .sta_ofs = 0x90, +}; + +static const struct mtk_gate_regs infra1_cg_regs = { + .set_ofs = 0x88, + .clr_ofs = 0x8c, + .sta_ofs = 0x94, +}; + +static const struct mtk_gate_regs infra2_cg_regs = { + .set_ofs = 0xa4, + .clr_ofs = 0xa8, + .sta_ofs = 0xac, +}; + +static const struct mtk_gate_regs infra3_cg_regs = { + .set_ofs = 0xc0, + .clr_ofs = 0xc4, + .sta_ofs = 0xc8, +}; + +#define GATE_INFRA0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) +#define GATE_INFRA1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) +#define GATE_INFRA2(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) +#define GATE_INFRA3(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate infra_clks[] = { + /* INFRA0 */ + GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", + "axi_sel", 0), + GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", + "axi_sel", 1), + GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md", + "axi_sel", 2), + GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn", + "axi_sel", 3), + GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scp", + "axi_sel", 4), + GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej", + "f_f26m_ck", 5), + GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt", + "axi_sel", 6), + GATE_INFRA0(CLK_INFRA_ICUSB, "infra_icusb", + "axi_sel", 8), + GATE_INFRA0(CLK_INFRA_GCE, "infra_gce", + "axi_sel", 9), + GATE_INFRA0(CLK_INFRA_THERM, "infra_therm", + "axi_sel", 10), + GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0", + "i2c_sel", 11), + GATE_INFRA0(CLK_INFRA_I2C1, "infra_i2c1", + "i2c_sel", 12), + GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2", + "i2c_sel", 13), + GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3", + "i2c_sel", 14), + GATE_INFRA0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk", + "pwm_sel", 15), + GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1", + "pwm_sel", 16), + GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2", + "pwm_sel", 17), + GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3", + "pwm_sel", 18), + GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4", + "pwm_sel", 19), + GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm", + "pwm_sel", 21), + GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1", + "uart_sel", 23), + GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2", + "uart_sel", 24), + GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3", + "uart_sel", 25), + GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m", + "axi_sel", 27), + GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cqdma_fpc", + "axi_sel", 28), + GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif", + "axi_sel", 31), + /* INFRA1 */ + GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0", + "spi_sel", 1), + GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0", + "msdc50_hclk_sel", 2), + GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1", + "axi_sel", 4), + GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2", + "axi_sel", 5), + GATE_INFRA1(CLK_INFRA_MSDC0_SCK, "infra_msdc0_sck", + "msdc50_0_sel", 6), + GATE_INFRA1(CLK_INFRA_DVFSRC, "infra_dvfsrc", + "f_f26m_ck", 7), + GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu", + "axi_sel", 8), + GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng", + "axi_sel", 9), + GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc", + "f_f26m_ck", 10), + GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum", + "axi_sel", 11), + GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap", + "axi_sel", 12), + GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md", + "axi_sel", 13), + GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md", + "f_f26m_ck", 14), + GATE_INFRA1(CLK_INFRA_MSDC1_SCK, "infra_msdc1_sck", + "msdc30_1_sel", 16), + GATE_INFRA1(CLK_INFRA_MSDC2_SCK, "infra_msdc2_sck", + "msdc30_2_sel", 17), + GATE_INFRA1(CLK_INFRA_AP_DMA, "infra_apdma", + "axi_sel", 18), + GATE_INFRA1(CLK_INFRA_XIU, "infra_xiu", + "axi_sel", 19), + GATE_INFRA1(CLK_INFRA_DEVICE_APC, "infra_device_apc", + "axi_sel", 20), + GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", + "axi_sel", 23), + GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys", + "axi_sel", 24), + GATE_INFRA1(CLK_INFRA_AUD, "infra_audio", + "axi_sel", 25), + GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md", + "axi_sel", 26), + GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core", + "dxcc_sel", 27), + GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao", + "dxcc_sel", 28), + GATE_INFRA1(CLK_INFRA_DEVMPU_BCLK, "infra_devmpu_bclk", + "axi_sel", 30), + GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", + "f_f26m_ck", 31), + /* INFRA2 */ + GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx", + "f_f26m_ck", 0), + GATE_INFRA2(CLK_INFRA_USB, "infra_usb", + "usb_top_sel", 1), + GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disppwm", + "axi_sel", 2), + GATE_INFRA2(CLK_INFRA_AUD_26M_BCLK, + "infracfg_ao_audio_26m_bclk", "f_f26m_ck", 4), + GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1", + "spi_sel", 6), + GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4", + "i2c_sel", 7), + GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_md_tmp_share", + "f_f26m_ck", 8), + GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2", + "spi_sel", 9), + GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3", + "spi_sel", 10), + GATE_INFRA2(CLK_INFRA_UNIPRO_SCK, "infra_unipro_sck", + "fufs_sel", 11), + GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", + "fufs_sel", 12), + GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck", + "fufs_sel", 13), + GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", + "axi_sel", 14), + GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", + "axi_sel", 16), + GATE_INFRA2(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk", + "axi_sel", 17), + GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", + "i2c_sel", 18), + GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", + "i2c_sel", 19), + GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", + "i2c_sel", 20), + GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter", + "i2c_sel", 21), + GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm", + "i2c_sel", 22), + GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter", + "i2c_sel", 23), + GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm", + "i2c_sel", 24), + GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4", + "spi_sel", 25), + GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5", + "spi_sel", 26), + GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cqdma", + "axi_sel", 27), + GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs", + "fufs_sel", 28), + GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde", + "faes_ufsfde_sel", 29), + GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick", + "fufs_sel", 30), + GATE_INFRA2(CLK_INFRA_SSUSB_XHCI, "infra_ssusb_xhci", + "ssusb_top_xhci_sel", 31), + /* INFRA3 */ + GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", + "msdc50_0_sel", 0), + GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", + "msdc50_0_sel", 1), + GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", + "msdc50_0_sel", 2), + GATE_INFRA3(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", + "f_f26m_ck", 3), + GATE_INFRA3(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", + "f_f26m_ck", 4), + GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", + "axi_sel", 5), + GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", + "i2c_sel", 6), + GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", + "msdc50_hclk_sel", 7), + GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0", + "msdc50_hclk_sel", 8), + GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap", + "axi_sel", 16), + GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md", + "axi_sel", 17), + GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap", + "axi_sel", 18), + GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md", + "axi_sel", 19), + GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m", + "f_f26m_ck", 20), + GATE_INFRA3(CLK_INFRA_AES_BCLK, "infra_aes_bclk", + "axi_sel", 21), + GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7", + "i2c_sel", 22), + GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8", + "i2c_sel", 23), + GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc", + "msdc50_0_sel", 24), + GATE_INFRA3(CLK_INFRA_DPMAIF_CK, "infra_dpmaif", + "dpmaif_sel", 26), + GATE_INFRA3(CLK_INFRA_FADSP, "infra_fadsp", + "adsp_sel", 27), + GATE_INFRA3(CLK_INFRA_CCIF4_AP, "infra_ccif4_ap", + "axi_sel", 28), + GATE_INFRA3(CLK_INFRA_CCIF4_MD, "infra_ccif4_md", + "axi_sel", 29), + GATE_INFRA3(CLK_INFRA_SPI6, "infra_spi6", + "spi_sel", 30), + GATE_INFRA3(CLK_INFRA_SPI7, "infra_spi7", + "spi_sel", 31), +}; + +static const struct mtk_gate_regs apmixed_cg_regs = { + .set_ofs = 0x20, + .clr_ofs = 0x20, + .sta_ofs = 0x20, +}; + +#define GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, _flags) \ + GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, \ + _shift, &mtk_clk_gate_ops_no_setclr_inv, _flags) + +#define GATE_APMIXED(_id, _name, _parent, _shift) \ + GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, 0) + +/* + * CRITICAL CLOCK: + * apmixed_appll26m is the toppest clock gate of all PLLs. + */ +static const struct mtk_gate apmixed_clks[] = { + GATE_APMIXED(CLK_APMIXED_SSUSB26M, "apmixed_ssusb26m", + "f_f26m_ck", 4), + GATE_APMIXED_FLAGS(CLK_APMIXED_APPLL26M, "apmixed_appll26m", + "f_f26m_ck", 5, CLK_IS_CRITICAL), + GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m", + "f_f26m_ck", 6), + GATE_APMIXED(CLK_APMIXED_MDPLLGP26M, "apmixed_mdpll26m", + "f_f26m_ck", 7), + GATE_APMIXED(CLK_APMIXED_MM_F26M, "apmixed_mmsys26m", + "f_f26m_ck", 8), + GATE_APMIXED(CLK_APMIXED_UFS26M, "apmixed_ufs26m", + "f_f26m_ck", 9), + GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m", + "f_f26m_ck", 11), + GATE_APMIXED(CLK_APMIXED_MEMPLL26M, "apmixed_mempll26m", + "f_f26m_ck", 13), + GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m", + "f_f26m_ck", 14), + GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m", + "f_f26m_ck", 16), + GATE_APMIXED(CLK_APMIXED_MIPID1_26M, "apmixed_mipid126m", + "f_f26m_ck", 17), +}; + +#define MT6779_PLL_FMAX (3800UL * MHZ) +#define MT6779_PLL_FMIN (1500UL * MHZ) + +#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \ + _pd_shift, _tuner_reg, _tuner_en_reg, \ + _tuner_en_bit, _pcw_reg, _pcw_shift, \ + _pcw_chg_reg, _div_table) { \ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = _rst_bar_mask, \ + .fmax = MT6779_PLL_FMAX, \ + .fmin = MT6779_PLL_FMIN, \ + .pcwbits = _pcwbits, \ + .pcwibits = _pcwibits, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .tuner_en_reg = _tuner_en_reg, \ + .tuner_en_bit = _tuner_en_bit, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = _pcw_shift, \ + .pcw_chg_reg = _pcw_chg_reg, \ + .div_table = _div_table, \ + } + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \ + _pd_shift, _tuner_reg, _tuner_en_reg, \ + _tuner_en_bit, _pcw_reg, _pcw_shift, \ + _pcw_chg_reg) \ + PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \ + _pd_shift, _tuner_reg, _tuner_en_reg, \ + _tuner_en_bit, _pcw_reg, _pcw_shift, \ + _pcw_chg_reg, NULL) + +static const struct mtk_pll_data plls[] = { + PLL(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, BIT(0), + PLL_AO, 0, 22, 8, 0x0204, 24, 0, 0, 0, 0x0204, 0, 0), + PLL(CLK_APMIXED_ARMPLL_BL, "armpll_bl", 0x0210, 0x021C, BIT(0), + PLL_AO, 0, 22, 8, 0x0214, 24, 0, 0, 0, 0x0214, 0, 0), + PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x02A0, 0x02AC, BIT(0), + PLL_AO, 0, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, BIT(0), + (HAVE_RST_BAR), BIT(24), 22, 8, 0x0234, 24, 0, 0, 0, + 0x0234, 0, 0), + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0240, 0x024C, BIT(0), + (HAVE_RST_BAR), BIT(24), 22, 8, 0x0244, 24, + 0, 0, 0, 0x0244, 0, 0), + PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0250, 0x025C, BIT(0), + 0, 0, 22, 8, 0x0254, 24, 0, 0, 0, 0x0254, 0, 0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0260, 0x026C, BIT(0), + 0, 0, 22, 8, 0x0264, 24, 0, 0, 0, 0x0264, 0, 0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, BIT(0), + 0, 0, 22, 8, 0x0274, 24, 0, 0, 0, 0x0274, 0, 0), + PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x02b0, 0x02bC, BIT(0), + (HAVE_RST_BAR), BIT(23), 22, 8, 0x02b4, 24, + 0, 0, 0, 0x02b4, 0, 0), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0280, 0x028C, BIT(0), + (HAVE_RST_BAR), BIT(23), 22, 8, 0x0284, 24, + 0, 0, 0, 0x0284, 0, 0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x02C0, 0x02D0, BIT(0), + 0, 0, 32, 8, 0x02C0, 1, 0, 0x14, 0, 0x02C4, 0, 0x2C0), + PLL(CLK_APMIXED_APLL2, "apll2", 0x02D4, 0x02E4, BIT(0), + 0, 0, 32, 8, 0x02D4, 1, 0, 0x14, 1, 0x02D8, 0, 0x02D4), +}; + +static int clk_mt6779_apmixed_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + + mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); + + mtk_clk_register_gates(node, apmixed_clks, + ARRAY_SIZE(apmixed_clks), clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static int clk_mt6779_top_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + void __iomem *base; + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), + clk_data); + + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + + mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), + node, &mt6779_clk_lock, clk_data); + + mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), + base, &mt6779_clk_lock, clk_data); + + mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), + base, &mt6779_clk_lock, clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static int clk_mt6779_infra_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); + + mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt6779[] = { + { + .compatible = "mediatek,mt6779-apmixed", + .data = clk_mt6779_apmixed_probe, + }, { + .compatible = "mediatek,mt6779-topckgen", + .data = clk_mt6779_top_probe, + }, { + .compatible = "mediatek,mt6779-infracfg_ao", + .data = clk_mt6779_infra_probe, + }, { + /* sentinel */ + } +}; + +static int clk_mt6779_probe(struct platform_device *pdev) +{ + int (*clk_probe)(struct platform_device *pdev); + int r; + + clk_probe = of_device_get_match_data(&pdev->dev); + if (!clk_probe) + return -EINVAL; + + r = clk_probe(pdev); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt6779_drv = { + .probe = clk_mt6779_probe, + .driver = { + .name = "clk-mt6779", + .of_match_table = of_match_clk_mt6779, + }, +}; + +static int __init clk_mt6779_init(void) +{ + return platform_driver_register(&clk_mt6779_drv); +} + +arch_initcall(clk_mt6779_init); diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c index 99a6b020833e7b..37b4162c588203 100644 --- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -5,6 +5,7 @@ #include #include +#include #include "clk-mtk.h" #include "clk-gate.h" @@ -30,10 +31,12 @@ static int clk_mt8183_mfg_probe(struct platform_device *pdev) struct clk_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; + pm_runtime_enable(&pdev->dev); + clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK); - mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), - clk_data); + mtk_clk_register_gates_with_dev(node, mfg_clks, ARRAY_SIZE(mfg_clks), + clk_data, &pdev->dev); return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); } diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 73b7e238eee75e..51c8d5c9a030fd 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -17,6 +17,9 @@ #include +/* Infra global controller reset set register */ +#define INFRA_RST0_SET_OFFSET 0x120 + static DEFINE_SPINLOCK(mt8183_clk_lock); static const struct mtk_fixed_clk top_fixed_clks[] = { @@ -1001,6 +1004,20 @@ static const struct mtk_gate infra_clks[] = { "msdc50_0_sel", 24), }; +static const struct mtk_gate_regs peri_cg_regs = { + .set_ofs = 0x20c, + .clr_ofs = 0x20c, + .sta_ofs = 0x20c, +}; + +#define GATE_PERI(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &peri_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr_inv) + +static const struct mtk_gate peri_clks[] = { + GATE_PERI(CLK_PERI_AXI, "peri_axi", "axi_sel", 31), +}; + static const struct mtk_gate_regs apmixed_cg_regs = { .set_ofs = 0x20, .clr_ofs = 0x20, @@ -1207,12 +1224,36 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev) { struct clk_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; + int r; clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data); + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) { + dev_err(&pdev->dev, + "%s(): could not register clock provider: %d\n", + __func__, r); + return r; + } + + mtk_register_reset_controller_set_clr(node, 4, INFRA_RST0_SET_OFFSET); + + return r; +} + +static int clk_mt8183_peri_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK); + + mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), + clk_data); + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); } @@ -1245,6 +1286,9 @@ static const struct of_device_id of_match_clk_mt8183[] = { }, { .compatible = "mediatek,mt8183-infracfg", .data = clk_mt8183_infra_probe, + }, { + .compatible = "mediatek,mt8183-pericfg", + .data = clk_mt8183_peri_probe, }, { .compatible = "mediatek,mt8183-mcucfg", .data = clk_mt8183_mcu_probe, diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c index d28790c7491920..cec1c8a2721135 100644 --- a/drivers/clk/mediatek/clk-mtk.c +++ b/drivers/clk/mediatek/clk-mtk.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "clk-mtk.h" #include "clk-gate.h" @@ -93,9 +94,10 @@ void mtk_clk_register_factors(const struct mtk_fixed_factor *clks, } } -int mtk_clk_register_gates(struct device_node *node, +int mtk_clk_register_gates_with_dev(struct device_node *node, const struct mtk_gate *clks, - int num, struct clk_onecell_data *clk_data) + int num, struct clk_onecell_data *clk_data, + struct device *dev) { int i; struct clk *clk; @@ -122,7 +124,7 @@ int mtk_clk_register_gates(struct device_node *node, gate->regs->set_ofs, gate->regs->clr_ofs, gate->regs->sta_ofs, - gate->shift, gate->ops, gate->flags); + gate->shift, gate->ops, gate->flags, dev); if (IS_ERR(clk)) { pr_err("Failed to register clk %s: %ld\n", @@ -136,6 +138,14 @@ int mtk_clk_register_gates(struct device_node *node, return 0; } +int mtk_clk_register_gates(struct device_node *node, + const struct mtk_gate *clks, + int num, struct clk_onecell_data *clk_data) +{ + return mtk_clk_register_gates_with_dev(node, + clks, num, clk_data, NULL); +} + struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock) { diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index 733a11d1de9461..c3d6756b0c7e75 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -169,6 +169,11 @@ int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks, int num, struct clk_onecell_data *clk_data); +int mtk_clk_register_gates_with_dev(struct device_node *node, + const struct mtk_gate *clks, + int num, struct clk_onecell_data *clk_data, + struct device *dev); + struct mtk_clk_divider { int id; const char *name; @@ -240,4 +245,7 @@ struct clk *mtk_clk_register_ref2usb_tx(const char *name, void mtk_register_reset_controller(struct device_node *np, unsigned int num_regs, int regofs); +void mtk_register_reset_controller_set_clr(struct device_node *np, + unsigned int num_regs, int regofs); + #endif /* __DRV_CLK_MTK_H */ diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c index d8376b92349ee7..cb939c071b0cca 100644 --- a/drivers/clk/mediatek/reset.c +++ b/drivers/clk/mediatek/reset.c @@ -19,6 +19,24 @@ struct mtk_reset { struct reset_controller_dev rcdev; }; +static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); + unsigned int reg = data->regofs + ((id / 32) << 4); + + return regmap_write(data->regmap, reg, 1); +} + +static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); + unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4; + + return regmap_write(data->regmap, reg, 1); +} + static int mtk_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) { @@ -49,14 +67,32 @@ static int mtk_reset(struct reset_controller_dev *rcdev, return mtk_reset_deassert(rcdev, id); } +static int mtk_reset_set_clr(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = mtk_reset_assert_set_clr(rcdev, id); + if (ret) + return ret; + return mtk_reset_deassert_set_clr(rcdev, id); +} + static const struct reset_control_ops mtk_reset_ops = { .assert = mtk_reset_assert, .deassert = mtk_reset_deassert, .reset = mtk_reset, }; -void mtk_register_reset_controller(struct device_node *np, - unsigned int num_regs, int regofs) +static const struct reset_control_ops mtk_reset_ops_set_clr = { + .assert = mtk_reset_assert_set_clr, + .deassert = mtk_reset_deassert_set_clr, + .reset = mtk_reset_set_clr, +}; + +static void mtk_register_reset_controller_common(struct device_node *np, + unsigned int num_regs, int regofs, + const struct reset_control_ops *reset_ops) { struct mtk_reset *data; int ret; @@ -77,7 +113,7 @@ void mtk_register_reset_controller(struct device_node *np, data->regofs = regofs; data->rcdev.owner = THIS_MODULE; data->rcdev.nr_resets = num_regs * 32; - data->rcdev.ops = &mtk_reset_ops; + data->rcdev.ops = reset_ops; data->rcdev.of_node = np; ret = reset_controller_register(&data->rcdev); @@ -87,3 +123,17 @@ void mtk_register_reset_controller(struct device_node *np, return; } } + +void mtk_register_reset_controller(struct device_node *np, + unsigned int num_regs, int regofs) +{ + mtk_register_reset_controller_common(np, num_regs, regofs, + &mtk_reset_ops); +} + +void mtk_register_reset_controller_set_clr(struct device_node *np, + unsigned int num_regs, int regofs) +{ + mtk_register_reset_controller_common(np, num_regs, regofs, + &mtk_reset_ops_set_clr); +} diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index 741df7e955ca07..18b23cdf679c07 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "axg-audio.h" @@ -918,6 +919,84 @@ static int devm_clk_get_enable(struct device *dev, char *id) return 0; } +struct axg_audio_reset_data { + struct reset_controller_dev rstc; + struct regmap *map; + unsigned int offset; +}; + +static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst, + unsigned long id, + unsigned int *reg, + unsigned int *bit) +{ + unsigned int stride = regmap_get_reg_stride(rst->map); + + *reg = (id / (stride * BITS_PER_BYTE)) * stride; + *reg += rst->offset; + *bit = id % (stride * BITS_PER_BYTE); +} + +static int axg_audio_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct axg_audio_reset_data *rst = + container_of(rcdev, struct axg_audio_reset_data, rstc); + unsigned int offset, bit; + + axg_audio_reset_reg_and_bit(rst, id, &offset, &bit); + + regmap_update_bits(rst->map, offset, BIT(bit), + assert ? BIT(bit) : 0); + + return 0; +} + +static int axg_audio_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct axg_audio_reset_data *rst = + container_of(rcdev, struct axg_audio_reset_data, rstc); + unsigned int val, offset, bit; + + axg_audio_reset_reg_and_bit(rst, id, &offset, &bit); + + regmap_read(rst->map, offset, &val); + + return !!(val & BIT(bit)); +} + +static int axg_audio_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return axg_audio_reset_update(rcdev, id, true); +} + +static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return axg_audio_reset_update(rcdev, id, false); +} + +static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = axg_audio_reset_assert(rcdev, id); + if (ret) + return ret; + + return axg_audio_reset_deassert(rcdev, id); +} + +static const struct reset_control_ops axg_audio_rstc_ops = { + .assert = axg_audio_reset_assert, + .deassert = axg_audio_reset_deassert, + .reset = axg_audio_reset_toggle, + .status = axg_audio_reset_status, +}; + static const struct regmap_config axg_audio_regmap_cfg = { .reg_bits = 32, .val_bits = 32, @@ -927,12 +1006,15 @@ static const struct regmap_config axg_audio_regmap_cfg = { struct audioclk_data { struct clk_hw_onecell_data *hw_onecell_data; + unsigned int reset_offset; + unsigned int reset_num; }; static int axg_audio_clkc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct audioclk_data *data; + struct axg_audio_reset_data *rst; struct regmap *map; struct resource *res; void __iomem *regs; @@ -971,21 +1053,43 @@ static int axg_audio_clkc_probe(struct platform_device *pdev) /* Take care to skip the registered input clocks */ for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) { + const char *name; + hw = data->hw_onecell_data->hws[i]; /* array might be sparse */ if (!hw) continue; + name = hw->init->name; + ret = devm_clk_hw_register(dev, hw); if (ret) { - dev_err(dev, "failed to register clock %s\n", - hw->init->name); + dev_err(dev, "failed to register clock %s\n", name); return ret; } } - return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, - data->hw_onecell_data); + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + data->hw_onecell_data); + if (ret) + return ret; + + /* Stop here if there is no reset */ + if (!data->reset_num) + return 0; + + rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL); + if (!rst) + return -ENOMEM; + + rst->map = map; + rst->offset = data->reset_offset; + rst->rstc.nr_resets = data->reset_num; + rst->rstc.ops = &axg_audio_rstc_ops; + rst->rstc.of_node = dev->of_node; + rst->rstc.owner = THIS_MODULE; + + return devm_reset_controller_register(dev, &rst->rstc); } static const struct audioclk_data axg_audioclk_data = { @@ -994,6 +1098,8 @@ static const struct audioclk_data axg_audioclk_data = { static const struct audioclk_data g12a_audioclk_data = { .hw_onecell_data = &g12a_audio_hw_onecell_data, + .reset_offset = AUDIO_SW_RESET, + .reset_num = 26, }; static const struct of_device_id clkc_match_table[] = { diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h index 5d972d55d6c741..c00e28b2e1a962 100644 --- a/drivers/clk/meson/axg-audio.h +++ b/drivers/clk/meson/axg-audio.h @@ -22,6 +22,7 @@ #define AUDIO_MCLK_F_CTRL 0x018 #define AUDIO_MST_PAD_CTRL0 0x01c #define AUDIO_MST_PAD_CTRL1 0x020 +#define AUDIO_SW_RESET 0x024 #define AUDIO_MST_A_SCLK_CTRL0 0x040 #define AUDIO_MST_A_SCLK_CTRL1 0x044 #define AUDIO_MST_B_SCLK_CTRL0 0x048 diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index c3f0ffc3280dee..ea4c791f106ddb 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -676,6 +676,226 @@ static struct clk_regmap g12b_cpub_clk = { }, }; +static struct clk_regmap sm1_gp1_pll; + +/* Datasheet names this field as "premux0" */ +static struct clk_regmap sm1_dsu_clk_premux0 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .mask = 0x3, + .shift = 0, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_dyn0_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_data = (const struct clk_parent_data []) { + { .fw_name = "xtal", }, + { .hw = &g12a_fclk_div2.hw }, + { .hw = &g12a_fclk_div3.hw }, + { .hw = &sm1_gp1_pll.hw }, + }, + .num_parents = 4, + }, +}; + +/* Datasheet names this field as "premux1" */ +static struct clk_regmap sm1_dsu_clk_premux1 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .mask = 0x3, + .shift = 16, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_dyn1_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_data = (const struct clk_parent_data []) { + { .fw_name = "xtal", }, + { .hw = &g12a_fclk_div2.hw }, + { .hw = &g12a_fclk_div3.hw }, + { .hw = &sm1_gp1_pll.hw }, + }, + .num_parents = 4, + }, +}; + +/* Datasheet names this field as "Mux0_divn_tcnt" */ +static struct clk_regmap sm1_dsu_clk_mux0_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .shift = 4, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_dyn0_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_dsu_clk_premux0.hw + }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "postmux0" */ +static struct clk_regmap sm1_dsu_clk_postmux0 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .mask = 0x1, + .shift = 2, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_dyn0", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_dsu_clk_premux0.hw, + &sm1_dsu_clk_mux0_div.hw, + }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Mux1_divn_tcnt" */ +static struct clk_regmap sm1_dsu_clk_mux1_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .shift = 20, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_dyn1_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_dsu_clk_premux1.hw + }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "postmux1" */ +static struct clk_regmap sm1_dsu_clk_postmux1 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .mask = 0x1, + .shift = 18, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_dyn1", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_dsu_clk_premux1.hw, + &sm1_dsu_clk_mux1_div.hw, + }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Final_dyn_mux_sel" */ +static struct clk_regmap sm1_dsu_clk_dyn = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .mask = 0x1, + .shift = 10, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_dyn", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_dsu_clk_postmux0.hw, + &sm1_dsu_clk_postmux1.hw, + }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Final_mux_sel" */ +static struct clk_regmap sm1_dsu_final_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL5, + .mask = 0x1, + .shift = 11, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk_final", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_dsu_clk_dyn.hw, + &g12a_sys_pll.hw, + }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Cpu_clk_sync_mux_sel" bit 0 */ +static struct clk_regmap sm1_cpu1_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL6, + .mask = 0x1, + .shift = 24, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu1_clk", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_cpu_clk.hw, + /* This CPU also have a dedicated clock tree */ + }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "Cpu_clk_sync_mux_sel" bit 1 */ +static struct clk_regmap sm1_cpu2_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL6, + .mask = 0x1, + .shift = 25, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu2_clk", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_cpu_clk.hw, + /* This CPU also have a dedicated clock tree */ + }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "Cpu_clk_sync_mux_sel" bit 2 */ +static struct clk_regmap sm1_cpu3_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL6, + .mask = 0x1, + .shift = 26, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu3_clk", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_cpu_clk.hw, + /* This CPU also have a dedicated clock tree */ + }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "Cpu_clk_sync_mux_sel" bit 4 */ +static struct clk_regmap sm1_dsu_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL6, + .mask = 0x1, + .shift = 27, + }, + .hw.init = &(struct clk_init_data){ + .name = "dsu_clk", + .ops = &clk_regmap_mux_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_cpu_clk.hw, + &sm1_dsu_final_clk.hw, + }, + .num_parents = 2, + }, +}; + static int g12a_cpu_clk_mux_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { @@ -1443,6 +1663,69 @@ static struct clk_regmap g12a_gp0_pll = { }, }; +static struct clk_regmap sm1_gp1_pll_dco = { + .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_GP1_PLL_CNTL0, + .shift = 28, + .width = 1, + }, + .m = { + .reg_off = HHI_GP1_PLL_CNTL0, + .shift = 0, + .width = 8, + }, + .n = { + .reg_off = HHI_GP1_PLL_CNTL0, + .shift = 10, + .width = 5, + }, + .frac = { + .reg_off = HHI_GP1_PLL_CNTL1, + .shift = 0, + .width = 17, + }, + .l = { + .reg_off = HHI_GP1_PLL_CNTL0, + .shift = 31, + .width = 1, + }, + .rst = { + .reg_off = HHI_GP1_PLL_CNTL0, + .shift = 29, + .width = 1, + }, + }, + .hw.init = &(struct clk_init_data){ + .name = "gp1_pll_dco", + .ops = &meson_clk_pll_ro_ops, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xtal", + }, + .num_parents = 1, + /* This clock feeds the DSU, avoid disabling it */ + .flags = CLK_IS_CRITICAL, + }, +}; + +static struct clk_regmap sm1_gp1_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_GP1_PLL_CNTL0, + .shift = 16, + .width = 3, + .flags = (CLK_DIVIDER_POWER_OF_TWO | + CLK_DIVIDER_ROUND_CLOSEST), + }, + .hw.init = &(struct clk_init_data){ + .name = "gp1_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_gp1_pll_dco.hw + }, + .num_parents = 1, + }, +}; + /* * Internal hifi pll emulation configuration parameters */ @@ -4121,6 +4404,240 @@ static struct clk_hw_onecell_data g12b_hw_onecell_data = { .num = NR_CLKS, }; +static struct clk_hw_onecell_data sm1_hw_onecell_data = { + .hws = { + [CLKID_SYS_PLL] = &g12a_sys_pll.hw, + [CLKID_FIXED_PLL] = &g12a_fixed_pll.hw, + [CLKID_FCLK_DIV2] = &g12a_fclk_div2.hw, + [CLKID_FCLK_DIV3] = &g12a_fclk_div3.hw, + [CLKID_FCLK_DIV4] = &g12a_fclk_div4.hw, + [CLKID_FCLK_DIV5] = &g12a_fclk_div5.hw, + [CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw, + [CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw, + [CLKID_GP0_PLL] = &g12a_gp0_pll.hw, + [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw, + [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw, + [CLKID_CLK81] = &g12a_clk81.hw, + [CLKID_MPLL0] = &g12a_mpll0.hw, + [CLKID_MPLL1] = &g12a_mpll1.hw, + [CLKID_MPLL2] = &g12a_mpll2.hw, + [CLKID_MPLL3] = &g12a_mpll3.hw, + [CLKID_DDR] = &g12a_ddr.hw, + [CLKID_DOS] = &g12a_dos.hw, + [CLKID_AUDIO_LOCKER] = &g12a_audio_locker.hw, + [CLKID_MIPI_DSI_HOST] = &g12a_mipi_dsi_host.hw, + [CLKID_ETH_PHY] = &g12a_eth_phy.hw, + [CLKID_ISA] = &g12a_isa.hw, + [CLKID_PL301] = &g12a_pl301.hw, + [CLKID_PERIPHS] = &g12a_periphs.hw, + [CLKID_SPICC0] = &g12a_spicc_0.hw, + [CLKID_I2C] = &g12a_i2c.hw, + [CLKID_SANA] = &g12a_sana.hw, + [CLKID_SD] = &g12a_sd.hw, + [CLKID_RNG0] = &g12a_rng0.hw, + [CLKID_UART0] = &g12a_uart0.hw, + [CLKID_SPICC1] = &g12a_spicc_1.hw, + [CLKID_HIU_IFACE] = &g12a_hiu_reg.hw, + [CLKID_MIPI_DSI_PHY] = &g12a_mipi_dsi_phy.hw, + [CLKID_ASSIST_MISC] = &g12a_assist_misc.hw, + [CLKID_SD_EMMC_A] = &g12a_emmc_a.hw, + [CLKID_SD_EMMC_B] = &g12a_emmc_b.hw, + [CLKID_SD_EMMC_C] = &g12a_emmc_c.hw, + [CLKID_AUDIO_CODEC] = &g12a_audio_codec.hw, + [CLKID_AUDIO] = &g12a_audio.hw, + [CLKID_ETH] = &g12a_eth_core.hw, + [CLKID_DEMUX] = &g12a_demux.hw, + [CLKID_AUDIO_IFIFO] = &g12a_audio_ififo.hw, + [CLKID_ADC] = &g12a_adc.hw, + [CLKID_UART1] = &g12a_uart1.hw, + [CLKID_G2D] = &g12a_g2d.hw, + [CLKID_RESET] = &g12a_reset.hw, + [CLKID_PCIE_COMB] = &g12a_pcie_comb.hw, + [CLKID_PARSER] = &g12a_parser.hw, + [CLKID_USB] = &g12a_usb_general.hw, + [CLKID_PCIE_PHY] = &g12a_pcie_phy.hw, + [CLKID_AHB_ARB0] = &g12a_ahb_arb0.hw, + [CLKID_AHB_DATA_BUS] = &g12a_ahb_data_bus.hw, + [CLKID_AHB_CTRL_BUS] = &g12a_ahb_ctrl_bus.hw, + [CLKID_HTX_HDCP22] = &g12a_htx_hdcp22.hw, + [CLKID_HTX_PCLK] = &g12a_htx_pclk.hw, + [CLKID_BT656] = &g12a_bt656.hw, + [CLKID_USB1_DDR_BRIDGE] = &g12a_usb1_to_ddr.hw, + [CLKID_MMC_PCLK] = &g12a_mmc_pclk.hw, + [CLKID_UART2] = &g12a_uart2.hw, + [CLKID_VPU_INTR] = &g12a_vpu_intr.hw, + [CLKID_GIC] = &g12a_gic.hw, + [CLKID_SD_EMMC_A_CLK0_SEL] = &g12a_sd_emmc_a_clk0_sel.hw, + [CLKID_SD_EMMC_A_CLK0_DIV] = &g12a_sd_emmc_a_clk0_div.hw, + [CLKID_SD_EMMC_A_CLK0] = &g12a_sd_emmc_a_clk0.hw, + [CLKID_SD_EMMC_B_CLK0_SEL] = &g12a_sd_emmc_b_clk0_sel.hw, + [CLKID_SD_EMMC_B_CLK0_DIV] = &g12a_sd_emmc_b_clk0_div.hw, + [CLKID_SD_EMMC_B_CLK0] = &g12a_sd_emmc_b_clk0.hw, + [CLKID_SD_EMMC_C_CLK0_SEL] = &g12a_sd_emmc_c_clk0_sel.hw, + [CLKID_SD_EMMC_C_CLK0_DIV] = &g12a_sd_emmc_c_clk0_div.hw, + [CLKID_SD_EMMC_C_CLK0] = &g12a_sd_emmc_c_clk0.hw, + [CLKID_MPLL0_DIV] = &g12a_mpll0_div.hw, + [CLKID_MPLL1_DIV] = &g12a_mpll1_div.hw, + [CLKID_MPLL2_DIV] = &g12a_mpll2_div.hw, + [CLKID_MPLL3_DIV] = &g12a_mpll3_div.hw, + [CLKID_FCLK_DIV2_DIV] = &g12a_fclk_div2_div.hw, + [CLKID_FCLK_DIV3_DIV] = &g12a_fclk_div3_div.hw, + [CLKID_FCLK_DIV4_DIV] = &g12a_fclk_div4_div.hw, + [CLKID_FCLK_DIV5_DIV] = &g12a_fclk_div5_div.hw, + [CLKID_FCLK_DIV7_DIV] = &g12a_fclk_div7_div.hw, + [CLKID_FCLK_DIV2P5_DIV] = &g12a_fclk_div2p5_div.hw, + [CLKID_HIFI_PLL] = &g12a_hifi_pll.hw, + [CLKID_VCLK2_VENCI0] = &g12a_vclk2_venci0.hw, + [CLKID_VCLK2_VENCI1] = &g12a_vclk2_venci1.hw, + [CLKID_VCLK2_VENCP0] = &g12a_vclk2_vencp0.hw, + [CLKID_VCLK2_VENCP1] = &g12a_vclk2_vencp1.hw, + [CLKID_VCLK2_VENCT0] = &g12a_vclk2_venct0.hw, + [CLKID_VCLK2_VENCT1] = &g12a_vclk2_venct1.hw, + [CLKID_VCLK2_OTHER] = &g12a_vclk2_other.hw, + [CLKID_VCLK2_ENCI] = &g12a_vclk2_enci.hw, + [CLKID_VCLK2_ENCP] = &g12a_vclk2_encp.hw, + [CLKID_DAC_CLK] = &g12a_dac_clk.hw, + [CLKID_AOCLK] = &g12a_aoclk_gate.hw, + [CLKID_IEC958] = &g12a_iec958_gate.hw, + [CLKID_ENC480P] = &g12a_enc480p.hw, + [CLKID_RNG1] = &g12a_rng1.hw, + [CLKID_VCLK2_ENCT] = &g12a_vclk2_enct.hw, + [CLKID_VCLK2_ENCL] = &g12a_vclk2_encl.hw, + [CLKID_VCLK2_VENCLMMC] = &g12a_vclk2_venclmmc.hw, + [CLKID_VCLK2_VENCL] = &g12a_vclk2_vencl.hw, + [CLKID_VCLK2_OTHER1] = &g12a_vclk2_other1.hw, + [CLKID_FIXED_PLL_DCO] = &g12a_fixed_pll_dco.hw, + [CLKID_SYS_PLL_DCO] = &g12a_sys_pll_dco.hw, + [CLKID_GP0_PLL_DCO] = &g12a_gp0_pll_dco.hw, + [CLKID_HIFI_PLL_DCO] = &g12a_hifi_pll_dco.hw, + [CLKID_DMA] = &g12a_dma.hw, + [CLKID_EFUSE] = &g12a_efuse.hw, + [CLKID_ROM_BOOT] = &g12a_rom_boot.hw, + [CLKID_RESET_SEC] = &g12a_reset_sec.hw, + [CLKID_SEC_AHB_APB3] = &g12a_sec_ahb_apb3.hw, + [CLKID_MPLL_PREDIV] = &g12a_mpll_prediv.hw, + [CLKID_VPU_0_SEL] = &g12a_vpu_0_sel.hw, + [CLKID_VPU_0_DIV] = &g12a_vpu_0_div.hw, + [CLKID_VPU_0] = &g12a_vpu_0.hw, + [CLKID_VPU_1_SEL] = &g12a_vpu_1_sel.hw, + [CLKID_VPU_1_DIV] = &g12a_vpu_1_div.hw, + [CLKID_VPU_1] = &g12a_vpu_1.hw, + [CLKID_VPU] = &g12a_vpu.hw, + [CLKID_VAPB_0_SEL] = &g12a_vapb_0_sel.hw, + [CLKID_VAPB_0_DIV] = &g12a_vapb_0_div.hw, + [CLKID_VAPB_0] = &g12a_vapb_0.hw, + [CLKID_VAPB_1_SEL] = &g12a_vapb_1_sel.hw, + [CLKID_VAPB_1_DIV] = &g12a_vapb_1_div.hw, + [CLKID_VAPB_1] = &g12a_vapb_1.hw, + [CLKID_VAPB_SEL] = &g12a_vapb_sel.hw, + [CLKID_VAPB] = &g12a_vapb.hw, + [CLKID_HDMI_PLL_DCO] = &g12a_hdmi_pll_dco.hw, + [CLKID_HDMI_PLL_OD] = &g12a_hdmi_pll_od.hw, + [CLKID_HDMI_PLL_OD2] = &g12a_hdmi_pll_od2.hw, + [CLKID_HDMI_PLL] = &g12a_hdmi_pll.hw, + [CLKID_VID_PLL] = &g12a_vid_pll_div.hw, + [CLKID_VID_PLL_SEL] = &g12a_vid_pll_sel.hw, + [CLKID_VID_PLL_DIV] = &g12a_vid_pll.hw, + [CLKID_VCLK_SEL] = &g12a_vclk_sel.hw, + [CLKID_VCLK2_SEL] = &g12a_vclk2_sel.hw, + [CLKID_VCLK_INPUT] = &g12a_vclk_input.hw, + [CLKID_VCLK2_INPUT] = &g12a_vclk2_input.hw, + [CLKID_VCLK_DIV] = &g12a_vclk_div.hw, + [CLKID_VCLK2_DIV] = &g12a_vclk2_div.hw, + [CLKID_VCLK] = &g12a_vclk.hw, + [CLKID_VCLK2] = &g12a_vclk2.hw, + [CLKID_VCLK_DIV1] = &g12a_vclk_div1.hw, + [CLKID_VCLK_DIV2_EN] = &g12a_vclk_div2_en.hw, + [CLKID_VCLK_DIV4_EN] = &g12a_vclk_div4_en.hw, + [CLKID_VCLK_DIV6_EN] = &g12a_vclk_div6_en.hw, + [CLKID_VCLK_DIV12_EN] = &g12a_vclk_div12_en.hw, + [CLKID_VCLK2_DIV1] = &g12a_vclk2_div1.hw, + [CLKID_VCLK2_DIV2_EN] = &g12a_vclk2_div2_en.hw, + [CLKID_VCLK2_DIV4_EN] = &g12a_vclk2_div4_en.hw, + [CLKID_VCLK2_DIV6_EN] = &g12a_vclk2_div6_en.hw, + [CLKID_VCLK2_DIV12_EN] = &g12a_vclk2_div12_en.hw, + [CLKID_VCLK_DIV2] = &g12a_vclk_div2.hw, + [CLKID_VCLK_DIV4] = &g12a_vclk_div4.hw, + [CLKID_VCLK_DIV6] = &g12a_vclk_div6.hw, + [CLKID_VCLK_DIV12] = &g12a_vclk_div12.hw, + [CLKID_VCLK2_DIV2] = &g12a_vclk2_div2.hw, + [CLKID_VCLK2_DIV4] = &g12a_vclk2_div4.hw, + [CLKID_VCLK2_DIV6] = &g12a_vclk2_div6.hw, + [CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw, + [CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw, + [CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw, + [CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw, + [CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw, + [CLKID_CTS_ENCI] = &g12a_cts_enci.hw, + [CLKID_CTS_ENCP] = &g12a_cts_encp.hw, + [CLKID_CTS_VDAC] = &g12a_cts_vdac.hw, + [CLKID_HDMI_TX] = &g12a_hdmi_tx.hw, + [CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw, + [CLKID_HDMI_DIV] = &g12a_hdmi_div.hw, + [CLKID_HDMI] = &g12a_hdmi.hw, + [CLKID_MALI_0_SEL] = &g12a_mali_0_sel.hw, + [CLKID_MALI_0_DIV] = &g12a_mali_0_div.hw, + [CLKID_MALI_0] = &g12a_mali_0.hw, + [CLKID_MALI_1_SEL] = &g12a_mali_1_sel.hw, + [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw, + [CLKID_MALI_1] = &g12a_mali_1.hw, + [CLKID_MALI] = &g12a_mali.hw, + [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw, + [CLKID_MPLL_50M] = &g12a_mpll_50m.hw, + [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw, + [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw, + [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw, + [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw, + [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw, + [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw, + [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw, + [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw, + [CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw, + [CLKID_CPU_CLK] = &g12a_cpu_clk.hw, + [CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw, + [CLKID_CPU_CLK_DIV16] = &g12a_cpu_clk_div16.hw, + [CLKID_CPU_CLK_APB_DIV] = &g12a_cpu_clk_apb_div.hw, + [CLKID_CPU_CLK_APB] = &g12a_cpu_clk_apb.hw, + [CLKID_CPU_CLK_ATB_DIV] = &g12a_cpu_clk_atb_div.hw, + [CLKID_CPU_CLK_ATB] = &g12a_cpu_clk_atb.hw, + [CLKID_CPU_CLK_AXI_DIV] = &g12a_cpu_clk_axi_div.hw, + [CLKID_CPU_CLK_AXI] = &g12a_cpu_clk_axi.hw, + [CLKID_CPU_CLK_TRACE_DIV] = &g12a_cpu_clk_trace_div.hw, + [CLKID_CPU_CLK_TRACE] = &g12a_cpu_clk_trace.hw, + [CLKID_PCIE_PLL_DCO] = &g12a_pcie_pll_dco.hw, + [CLKID_PCIE_PLL_DCO_DIV2] = &g12a_pcie_pll_dco_div2.hw, + [CLKID_PCIE_PLL_OD] = &g12a_pcie_pll_od.hw, + [CLKID_PCIE_PLL] = &g12a_pcie_pll.hw, + [CLKID_VDEC_1_SEL] = &g12a_vdec_1_sel.hw, + [CLKID_VDEC_1_DIV] = &g12a_vdec_1_div.hw, + [CLKID_VDEC_1] = &g12a_vdec_1.hw, + [CLKID_VDEC_HEVC_SEL] = &g12a_vdec_hevc_sel.hw, + [CLKID_VDEC_HEVC_DIV] = &g12a_vdec_hevc_div.hw, + [CLKID_VDEC_HEVC] = &g12a_vdec_hevc.hw, + [CLKID_VDEC_HEVCF_SEL] = &g12a_vdec_hevcf_sel.hw, + [CLKID_VDEC_HEVCF_DIV] = &g12a_vdec_hevcf_div.hw, + [CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw, + [CLKID_TS_DIV] = &g12a_ts_div.hw, + [CLKID_TS] = &g12a_ts.hw, + [CLKID_GP1_PLL_DCO] = &sm1_gp1_pll_dco.hw, + [CLKID_GP1_PLL] = &sm1_gp1_pll.hw, + [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_premux0.hw, + [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_premux1.hw, + [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_mux0_div.hw, + [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_postmux0.hw, + [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_mux1_div.hw, + [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_postmux1.hw, + [CLKID_DSU_CLK_DYN] = &sm1_dsu_clk_dyn.hw, + [CLKID_DSU_CLK_FINAL] = &sm1_dsu_final_clk.hw, + [CLKID_DSU_CLK] = &sm1_dsu_clk.hw, + [CLKID_CPU1_CLK] = &sm1_cpu1_clk.hw, + [CLKID_CPU2_CLK] = &sm1_cpu2_clk.hw, + [CLKID_CPU3_CLK] = &sm1_cpu3_clk.hw, + [NR_CLKS] = NULL, + }, + .num = NR_CLKS, +}; + /* Convenience table to populate regmap in .probe */ static struct clk_regmap *const g12a_clk_regmaps[] = { &g12a_clk81, @@ -4336,6 +4853,20 @@ static struct clk_regmap *const g12a_clk_regmaps[] = { &g12b_cpub_clk_axi, &g12b_cpub_clk_trace_sel, &g12b_cpub_clk_trace, + &sm1_gp1_pll_dco, + &sm1_gp1_pll, + &sm1_dsu_clk_premux0, + &sm1_dsu_clk_premux1, + &sm1_dsu_clk_mux0_div, + &sm1_dsu_clk_postmux0, + &sm1_dsu_clk_mux1_div, + &sm1_dsu_clk_postmux1, + &sm1_dsu_clk_dyn, + &sm1_dsu_final_clk, + &sm1_dsu_clk, + &sm1_cpu1_clk, + &sm1_cpu2_clk, + &sm1_cpu3_clk, }; static const struct reg_sequence g12a_init_regs[] = { @@ -4532,6 +5063,15 @@ static const struct meson_g12a_data g12b_clkc_data = { .dvfs_setup = meson_g12b_dvfs_setup, }; +static const struct meson_g12a_data sm1_clkc_data = { + .eeclkc_data = { + .regmap_clks = g12a_clk_regmaps, + .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps), + .hw_onecell_data = &sm1_hw_onecell_data, + }, + .dvfs_setup = meson_g12a_dvfs_setup, +}; + static const struct of_device_id clkc_match_table[] = { { .compatible = "amlogic,g12a-clkc", @@ -4541,6 +5081,10 @@ static const struct of_device_id clkc_match_table[] = { .compatible = "amlogic,g12b-clkc", .data = &g12b_clkc_data.eeclkc_data }, + { + .compatible = "amlogic,sm1-clkc", + .data = &sm1_clkc_data.eeclkc_data + }, {} }; diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h index 559a34cfdfeb87..9df4068aced113 100644 --- a/drivers/clk/meson/g12a.h +++ b/drivers/clk/meson/g12a.h @@ -29,6 +29,14 @@ #define HHI_GP0_PLL_CNTL5 0x054 #define HHI_GP0_PLL_CNTL6 0x058 #define HHI_GP0_PLL_STS 0x05C +#define HHI_GP1_PLL_CNTL0 0x060 +#define HHI_GP1_PLL_CNTL1 0x064 +#define HHI_GP1_PLL_CNTL2 0x068 +#define HHI_GP1_PLL_CNTL3 0x06C +#define HHI_GP1_PLL_CNTL4 0x070 +#define HHI_GP1_PLL_CNTL5 0x074 +#define HHI_GP1_PLL_CNTL6 0x078 +#define HHI_GP1_PLL_STS 0x07C #define HHI_PCIE_PLL_CNTL0 0x098 #define HHI_PCIE_PLL_CNTL1 0x09C #define HHI_PCIE_PLL_CNTL2 0x0A0 @@ -72,6 +80,11 @@ #define HHI_SYS_CPUB_CLK_CNTL1 0x200 #define HHI_SYS_CPUB_CLK_CNTL 0x208 #define HHI_VPU_CLKB_CNTL 0x20C +#define HHI_SYS_CPU_CLK_CNTL2 0x210 +#define HHI_SYS_CPU_CLK_CNTL3 0x214 +#define HHI_SYS_CPU_CLK_CNTL4 0x218 +#define HHI_SYS_CPU_CLK_CNTL5 0x21c +#define HHI_SYS_CPU_CLK_CNTL6 0x220 #define HHI_GEN_CLK_CNTL 0x228 #define HHI_VDIN_MEAS_CLK_CNTL 0x250 #define HHI_MIPIDSI_PHY_CLK_CNTL 0x254 @@ -233,8 +246,17 @@ #define CLKID_CPUB_CLK_AXI 239 #define CLKID_CPUB_CLK_TRACE_SEL 240 #define CLKID_CPUB_CLK_TRACE 241 +#define CLKID_GP1_PLL_DCO 242 +#define CLKID_DSU_CLK_DYN0_SEL 244 +#define CLKID_DSU_CLK_DYN0_DIV 245 +#define CLKID_DSU_CLK_DYN0 246 +#define CLKID_DSU_CLK_DYN1_SEL 247 +#define CLKID_DSU_CLK_DYN1_DIV 248 +#define CLKID_DSU_CLK_DYN1 249 +#define CLKID_DSU_CLK_DYN 250 +#define CLKID_DSU_CLK_FINAL 251 -#define NR_CLKS 242 +#define NR_CLKS 256 /* include the CLKIDs that have been made part of the DT binding */ #include diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig index b09f6ded0a30d3..415e6906a113f9 100644 --- a/drivers/clk/mvebu/Kconfig +++ b/drivers/clk/mvebu/Kconfig @@ -8,6 +8,9 @@ config MVEBU_CLK_CPU config MVEBU_CLK_COREDIV bool +config ARMADA_AP_CP_HELPER + bool + config ARMADA_370_CLK bool select MVEBU_CLK_COMMON @@ -35,9 +38,14 @@ config ARMADA_XP_CLK config ARMADA_AP806_SYSCON bool + select ARMADA_AP_CP_HELPER + +config ARMADA_AP_CPU_CLK + bool config ARMADA_CP110_SYSCON bool + select ARMADA_AP_CP_HELPER config DOVE_CLK bool diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile index 93ac3685271f92..04464cef0f06ff 100644 --- a/drivers/clk/mvebu/Makefile +++ b/drivers/clk/mvebu/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_MVEBU_CLK_COMMON) += common.o obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o +obj-$(CONFIG_ARMADA_AP_CP_HELPER) += armada_ap_cp_helper.o obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o @@ -12,6 +13,7 @@ obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-tbg.o obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-periph.o obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o mv98dx3236.o obj-$(CONFIG_ARMADA_AP806_SYSCON) += ap806-system-controller.o +obj-$(CONFIG_ARMADA_AP_CPU_CLK) += ap-cpu-clk.o obj-$(CONFIG_ARMADA_CP110_SYSCON) += cp110-system-controller.o obj-$(CONFIG_DOVE_CLK) += dove.o dove-divider.o obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c new file mode 100644 index 00000000000000..af5e5acad3706a --- /dev/null +++ b/drivers/clk/mvebu/ap-cpu-clk.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Marvell Armada AP CPU Clock Controller + * + * Copyright (C) 2018 Marvell + * + * Omri Itach + * Gregory Clement + */ + +#define pr_fmt(fmt) "ap-cpu-clk: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include "armada_ap_cp_helper.h" + +#define AP806_CPU_CLUSTER0 0 +#define AP806_CPU_CLUSTER1 1 +#define AP806_CPUS_PER_CLUSTER 2 +#define APN806_CPU1_MASK 0x1 + +#define APN806_CLUSTER_NUM_OFFSET 8 +#define APN806_CLUSTER_NUM_MASK BIT(APN806_CLUSTER_NUM_OFFSET) + +#define APN806_MAX_DIVIDER 32 + +/** + * struct cpu_dfs_regs: CPU DFS register mapping + * @divider_reg: full integer ratio from PLL frequency to CPU clock frequency + * @force_reg: request to force new ratio regardless of relation to other clocks + * @ratio_reg: central request to switch ratios + */ +struct cpu_dfs_regs { + unsigned int divider_reg; + unsigned int force_reg; + unsigned int ratio_reg; + unsigned int ratio_state_reg; + unsigned int divider_mask; + unsigned int cluster_offset; + unsigned int force_mask; + int divider_offset; + int divider_ratio; + int ratio_offset; + int ratio_state_offset; + int ratio_state_cluster_offset; +}; + +/* AP806 CPU DFS register mapping*/ +#define AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET 0x278 +#define AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET 0x280 +#define AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET 0x284 +#define AP806_CA72MP2_0_PLL_SR_REG_OFFSET 0xC94 + +#define AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x14 +#define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 0 +#define AP806_PLL_CR_CPU_CLK_DIV_RATIO 0 +#define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \ + (0x3f << AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET) +#define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 24 +#define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \ + (0x1 << AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET) +#define AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 16 +#define AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET 0 +#define AP806_CA72MP2_0_PLL_RATIO_STATE 11 + +#define STATUS_POLL_PERIOD_US 1 +#define STATUS_POLL_TIMEOUT_US 1000000 + +#define to_ap_cpu_clk(_hw) container_of(_hw, struct ap_cpu_clk, hw) + +static const struct cpu_dfs_regs ap806_dfs_regs = { + .divider_reg = AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET, + .force_reg = AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET, + .ratio_reg = AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET, + .ratio_state_reg = AP806_CA72MP2_0_PLL_SR_REG_OFFSET, + .divider_mask = AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK, + .cluster_offset = AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET, + .force_mask = AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK, + .divider_offset = AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET, + .divider_ratio = AP806_PLL_CR_CPU_CLK_DIV_RATIO, + .ratio_offset = AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET, + .ratio_state_offset = AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET, + .ratio_state_cluster_offset = AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET, +}; + +/* AP807 CPU DFS register mapping */ +#define AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET 0x278 +#define AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET 0x27c +#define AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET 0xc98 +#define AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x8 +#define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 18 +#define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \ + (0x3f << AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET) +#define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET 12 +#define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK \ + (0x3f << AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET) +#define AP807_PLL_CR_CPU_CLK_DIV_RATIO 3 +#define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 0 +#define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \ + (0x3 << AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET) +#define AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 6 +#define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET 20 +#define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET 3 + +static const struct cpu_dfs_regs ap807_dfs_regs = { + .divider_reg = AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET, + .force_reg = AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET, + .ratio_reg = AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET, + .ratio_state_reg = AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET, + .divider_mask = AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK, + .cluster_offset = AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET, + .force_mask = AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK, + .divider_offset = AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET, + .divider_ratio = AP807_PLL_CR_CPU_CLK_DIV_RATIO, + .ratio_offset = AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET, + .ratio_state_offset = AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET, + .ratio_state_cluster_offset = + AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET +}; + +/* + * struct ap806_clk: CPU cluster clock controller instance + * @cluster: Cluster clock controller index + * @clk_name: Cluster clock controller name + * @dev : Cluster clock device + * @hw: HW specific structure of Cluster clock controller + * @pll_cr_base: CA72MP2 Register base (Device Sample at Reset register) + */ +struct ap_cpu_clk { + unsigned int cluster; + const char *clk_name; + struct device *dev; + struct clk_hw hw; + struct regmap *pll_cr_base; + const struct cpu_dfs_regs *pll_regs; +}; + +static unsigned long ap_cpu_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ap_cpu_clk *clk = to_ap_cpu_clk(hw); + unsigned int cpu_clkdiv_reg; + int cpu_clkdiv_ratio; + + cpu_clkdiv_reg = clk->pll_regs->divider_reg + + (clk->cluster * clk->pll_regs->cluster_offset); + regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, &cpu_clkdiv_ratio); + cpu_clkdiv_ratio &= clk->pll_regs->divider_mask; + cpu_clkdiv_ratio >>= clk->pll_regs->divider_offset; + + return parent_rate / cpu_clkdiv_ratio; +} + +static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ap_cpu_clk *clk = to_ap_cpu_clk(hw); + int ret, reg, divider = parent_rate / rate; + unsigned int cpu_clkdiv_reg, cpu_force_reg, cpu_ratio_reg, stable_bit; + + cpu_clkdiv_reg = clk->pll_regs->divider_reg + + (clk->cluster * clk->pll_regs->cluster_offset); + cpu_force_reg = clk->pll_regs->force_reg + + (clk->cluster * clk->pll_regs->cluster_offset); + cpu_ratio_reg = clk->pll_regs->ratio_reg + + (clk->cluster * clk->pll_regs->cluster_offset); + + regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, ®); + reg &= ~(clk->pll_regs->divider_mask); + reg |= (divider << clk->pll_regs->divider_offset); + + /* + * AP807 CPU divider has two channels with ratio 1:3 and divider_ratio + * is 1. Otherwise, in the case of the AP806, divider_ratio is 0. + */ + if (clk->pll_regs->divider_ratio) { + reg &= ~(AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK); + reg |= ((divider * clk->pll_regs->divider_ratio) << + AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET); + } + regmap_write(clk->pll_cr_base, cpu_clkdiv_reg, reg); + + + regmap_update_bits(clk->pll_cr_base, cpu_force_reg, + clk->pll_regs->force_mask, + clk->pll_regs->force_mask); + + regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg, + BIT(clk->pll_regs->ratio_offset), + BIT(clk->pll_regs->ratio_offset)); + + stable_bit = BIT(clk->pll_regs->ratio_state_offset + + clk->cluster * + clk->pll_regs->ratio_state_cluster_offset), + ret = regmap_read_poll_timeout(clk->pll_cr_base, + clk->pll_regs->ratio_state_reg, reg, + reg & stable_bit, STATUS_POLL_PERIOD_US, + STATUS_POLL_TIMEOUT_US); + if (ret) + return ret; + + regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg, + BIT(clk->pll_regs->ratio_offset), 0); + + return 0; +} + +static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + int divider = *parent_rate / rate; + + divider = min(divider, APN806_MAX_DIVIDER); + + return *parent_rate / divider; +} + +static const struct clk_ops ap_cpu_clk_ops = { + .recalc_rate = ap_cpu_clk_recalc_rate, + .round_rate = ap_cpu_clk_round_rate, + .set_rate = ap_cpu_clk_set_rate, +}; + +static int ap_cpu_clock_probe(struct platform_device *pdev) +{ + int ret, nclusters = 0, cluster_index = 0; + struct device *dev = &pdev->dev; + struct device_node *dn, *np = dev->of_node; + struct clk_hw_onecell_data *ap_cpu_data; + struct ap_cpu_clk *ap_cpu_clk; + struct regmap *regmap; + + regmap = syscon_node_to_regmap(np->parent); + if (IS_ERR(regmap)) { + pr_err("cannot get pll_cr_base regmap\n"); + return PTR_ERR(regmap); + } + + /* + * AP806 has 4 cpus and DFS for AP806 is controlled per + * cluster (2 CPUs per cluster), cpu0 and cpu1 are fixed to + * cluster0 while cpu2 and cpu3 are fixed to cluster1 whether + * they are enabled or not. Since cpu0 is the boot cpu, then + * cluster0 must exist. If cpu2 or cpu3 is enabled, cluster1 + * will exist and the cluster number is 2; otherwise the + * cluster number is 1. + */ + nclusters = 1; + for_each_of_cpu_node(dn) { + int cpu, err; + + err = of_property_read_u32(dn, "reg", &cpu); + if (WARN_ON(err)) + return err; + + /* If cpu2 or cpu3 is enabled */ + if (cpu & APN806_CLUSTER_NUM_MASK) { + nclusters = 2; + break; + } + } + /* + * DFS for AP806 is controlled per cluster (2 CPUs per cluster), + * so allocate structs per cluster + */ + ap_cpu_clk = devm_kcalloc(dev, nclusters, sizeof(*ap_cpu_clk), + GFP_KERNEL); + if (!ap_cpu_clk) + return -ENOMEM; + + ap_cpu_data = devm_kzalloc(dev, sizeof(*ap_cpu_data) + + sizeof(struct clk_hw *) * nclusters, + GFP_KERNEL); + if (!ap_cpu_data) + return -ENOMEM; + + for_each_of_cpu_node(dn) { + char *clk_name = "cpu-cluster-0"; + struct clk_init_data init; + const char *parent_name; + struct clk *parent; + int cpu, err; + + err = of_property_read_u32(dn, "reg", &cpu); + if (WARN_ON(err)) + return err; + + cluster_index = cpu & APN806_CLUSTER_NUM_MASK; + cluster_index >>= APN806_CLUSTER_NUM_OFFSET; + + /* Initialize once for one cluster */ + if (ap_cpu_data->hws[cluster_index]) + continue; + + parent = of_clk_get(np, cluster_index); + if (IS_ERR(parent)) { + dev_err(dev, "Could not get the clock parent\n"); + return -EINVAL; + } + parent_name = __clk_get_name(parent); + clk_name[12] += cluster_index; + ap_cpu_clk[cluster_index].clk_name = + ap_cp_unique_name(dev, np->parent, clk_name); + ap_cpu_clk[cluster_index].cluster = cluster_index; + ap_cpu_clk[cluster_index].pll_cr_base = regmap; + ap_cpu_clk[cluster_index].hw.init = &init; + ap_cpu_clk[cluster_index].dev = dev; + ap_cpu_clk[cluster_index].pll_regs = of_device_get_match_data(&pdev->dev); + + init.name = ap_cpu_clk[cluster_index].clk_name; + init.ops = &ap_cpu_clk_ops; + init.num_parents = 1; + init.parent_names = &parent_name; + + ret = devm_clk_hw_register(dev, &ap_cpu_clk[cluster_index].hw); + if (ret) + return ret; + ap_cpu_data->hws[cluster_index] = &ap_cpu_clk[cluster_index].hw; + } + + ap_cpu_data->num = cluster_index + 1; + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, ap_cpu_data); + if (ret) + dev_err(dev, "failed to register OF clock provider\n"); + + return ret; +} + +static const struct of_device_id ap_cpu_clock_of_match[] = { + { + .compatible = "marvell,ap806-cpu-clock", + .data = &ap806_dfs_regs, + }, + { + .compatible = "marvell,ap807-cpu-clock", + .data = &ap807_dfs_regs, + }, + { } +}; + +static struct platform_driver ap_cpu_clock_driver = { + .probe = ap_cpu_clock_probe, + .driver = { + .name = "marvell-ap-cpu-clock", + .of_match_table = ap_cpu_clock_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(ap_cpu_clock_driver); diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c index ea54a874bbdadd..948bd1e71aea6b 100644 --- a/drivers/clk/mvebu/ap806-system-controller.c +++ b/drivers/clk/mvebu/ap806-system-controller.c @@ -10,18 +10,18 @@ #define pr_fmt(fmt) "ap806-system-controller: " fmt +#include "armada_ap_cp_helper.h" #include #include #include #include -#include #include #include #define AP806_SAR_REG 0x400 #define AP806_SAR_CLKFREQ_MODE_MASK 0x1f -#define AP806_CLK_NUM 5 +#define AP806_CLK_NUM 6 static struct clk *ap806_clks[AP806_CLK_NUM]; @@ -30,86 +30,149 @@ static struct clk_onecell_data ap806_clk_data = { .clk_num = AP806_CLK_NUM, }; -static char *ap806_unique_name(struct device *dev, struct device_node *np, - char *name) +static int ap806_get_sar_clocks(unsigned int freq_mode, + unsigned int *cpuclk_freq, + unsigned int *dclk_freq) { - const __be32 *reg; - u64 addr; - - reg = of_get_property(np, "reg", NULL); - addr = of_translate_address(np, reg); - return devm_kasprintf(dev, GFP_KERNEL, "%llx-%s", - (unsigned long long)addr, name); -} - -static int ap806_syscon_common_probe(struct platform_device *pdev, - struct device_node *syscon_node) -{ - unsigned int freq_mode, cpuclk_freq; - const char *name, *fixedclk_name; - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct regmap *regmap; - u32 reg; - int ret; - - regmap = syscon_node_to_regmap(syscon_node); - if (IS_ERR(regmap)) { - dev_err(dev, "cannot get regmap\n"); - return PTR_ERR(regmap); - } - - ret = regmap_read(regmap, AP806_SAR_REG, ®); - if (ret) { - dev_err(dev, "cannot read from regmap\n"); - return ret; - } - - freq_mode = reg & AP806_SAR_CLKFREQ_MODE_MASK; switch (freq_mode) { case 0x0: + *cpuclk_freq = 2000; + *dclk_freq = 600; + break; case 0x1: - cpuclk_freq = 2000; + *cpuclk_freq = 2000; + *dclk_freq = 525; break; case 0x6: + *cpuclk_freq = 1800; + *dclk_freq = 600; + break; case 0x7: - cpuclk_freq = 1800; + *cpuclk_freq = 1800; + *dclk_freq = 525; break; case 0x4: + *cpuclk_freq = 1600; + *dclk_freq = 400; + break; case 0xB: + *cpuclk_freq = 1600; + *dclk_freq = 450; + break; case 0xD: - cpuclk_freq = 1600; + *cpuclk_freq = 1600; + *dclk_freq = 525; break; case 0x1a: - cpuclk_freq = 1400; + *cpuclk_freq = 1400; + *dclk_freq = 400; break; case 0x14: + *cpuclk_freq = 1300; + *dclk_freq = 400; + break; case 0x17: - cpuclk_freq = 1300; + *cpuclk_freq = 1300; + *dclk_freq = 325; break; case 0x19: - cpuclk_freq = 1200; + *cpuclk_freq = 1200; + *dclk_freq = 400; break; case 0x13: + *cpuclk_freq = 1000; + *dclk_freq = 325; + break; case 0x1d: - cpuclk_freq = 1000; + *cpuclk_freq = 1000; + *dclk_freq = 400; break; case 0x1c: - cpuclk_freq = 800; + *cpuclk_freq = 800; + *dclk_freq = 400; break; case 0x1b: - cpuclk_freq = 600; + *cpuclk_freq = 600; + *dclk_freq = 400; break; default: - dev_err(dev, "invalid SAR value\n"); return -EINVAL; } + return 0; +} + +static int ap807_get_sar_clocks(unsigned int freq_mode, + unsigned int *cpuclk_freq, + unsigned int *dclk_freq) +{ + switch (freq_mode) { + case 0x0: + *cpuclk_freq = 2000; + *dclk_freq = 1200; + break; + case 0x6: + *cpuclk_freq = 2200; + *dclk_freq = 1200; + break; + case 0xD: + *cpuclk_freq = 1600; + *dclk_freq = 1200; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ap806_syscon_common_probe(struct platform_device *pdev, + struct device_node *syscon_node) +{ + unsigned int freq_mode, cpuclk_freq, dclk_freq; + const char *name, *fixedclk_name; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct regmap *regmap; + u32 reg; + int ret; + + regmap = syscon_node_to_regmap(syscon_node); + if (IS_ERR(regmap)) { + dev_err(dev, "cannot get regmap\n"); + return PTR_ERR(regmap); + } + + ret = regmap_read(regmap, AP806_SAR_REG, ®); + if (ret) { + dev_err(dev, "cannot read from regmap\n"); + return ret; + } + + freq_mode = reg & AP806_SAR_CLKFREQ_MODE_MASK; + + if (of_device_is_compatible(pdev->dev.of_node, + "marvell,ap806-clock")) { + ret = ap806_get_sar_clocks(freq_mode, &cpuclk_freq, &dclk_freq); + } else if (of_device_is_compatible(pdev->dev.of_node, + "marvell,ap807-clock")) { + ret = ap807_get_sar_clocks(freq_mode, &cpuclk_freq, &dclk_freq); + } else { + dev_err(dev, "compatible not supported\n"); + return -EINVAL; + } + + if (ret) { + dev_err(dev, "invalid Sample at Reset value\n"); + return ret; + } + /* Convert to hertz */ cpuclk_freq *= 1000 * 1000; + dclk_freq *= 1000 * 1000; /* CPU clocks depend on the Sample At Reset configuration */ - name = ap806_unique_name(dev, syscon_node, "cpu-cluster-0"); + name = ap_cp_unique_name(dev, syscon_node, "pll-cluster-0"); ap806_clks[0] = clk_register_fixed_rate(dev, name, NULL, 0, cpuclk_freq); if (IS_ERR(ap806_clks[0])) { @@ -117,7 +180,7 @@ static int ap806_syscon_common_probe(struct platform_device *pdev, goto fail0; } - name = ap806_unique_name(dev, syscon_node, "cpu-cluster-1"); + name = ap_cp_unique_name(dev, syscon_node, "pll-cluster-1"); ap806_clks[1] = clk_register_fixed_rate(dev, name, NULL, 0, cpuclk_freq); if (IS_ERR(ap806_clks[1])) { @@ -126,7 +189,7 @@ static int ap806_syscon_common_probe(struct platform_device *pdev, } /* Fixed clock is always 1200 Mhz */ - fixedclk_name = ap806_unique_name(dev, syscon_node, "fixed"); + fixedclk_name = ap_cp_unique_name(dev, syscon_node, "fixed"); ap806_clks[2] = clk_register_fixed_rate(dev, fixedclk_name, NULL, 0, 1200 * 1000 * 1000); if (IS_ERR(ap806_clks[2])) { @@ -135,7 +198,7 @@ static int ap806_syscon_common_probe(struct platform_device *pdev, } /* MSS Clock is fixed clock divided by 6 */ - name = ap806_unique_name(dev, syscon_node, "mss"); + name = ap_cp_unique_name(dev, syscon_node, "mss"); ap806_clks[3] = clk_register_fixed_factor(NULL, name, fixedclk_name, 0, 1, 6); if (IS_ERR(ap806_clks[3])) { @@ -144,7 +207,7 @@ static int ap806_syscon_common_probe(struct platform_device *pdev, } /* SDIO(/eMMC) Clock is fixed clock divided by 3 */ - name = ap806_unique_name(dev, syscon_node, "sdio"); + name = ap_cp_unique_name(dev, syscon_node, "sdio"); ap806_clks[4] = clk_register_fixed_factor(NULL, name, fixedclk_name, 0, 1, 3); @@ -153,6 +216,14 @@ static int ap806_syscon_common_probe(struct platform_device *pdev, goto fail4; } + /* AP-DCLK(HCLK) Clock is DDR clock divided by 2 */ + name = ap_cp_unique_name(dev, syscon_node, "ap-dclk"); + ap806_clks[5] = clk_register_fixed_rate(dev, name, NULL, 0, dclk_freq); + if (IS_ERR(ap806_clks[5])) { + ret = PTR_ERR(ap806_clks[5]); + goto fail5; + } + ret = of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data); if (ret) goto fail_clk_add; @@ -160,6 +231,8 @@ static int ap806_syscon_common_probe(struct platform_device *pdev, return 0; fail_clk_add: + clk_unregister_fixed_factor(ap806_clks[5]); +fail5: clk_unregister_fixed_factor(ap806_clks[4]); fail4: clk_unregister_fixed_factor(ap806_clks[3]); @@ -206,6 +279,7 @@ builtin_platform_driver(ap806_syscon_legacy_driver); static const struct of_device_id ap806_clock_of_match[] = { { .compatible = "marvell,ap806-clock", }, + { .compatible = "marvell,ap807-clock", }, { } }; diff --git a/drivers/clk/mvebu/armada_ap_cp_helper.c b/drivers/clk/mvebu/armada_ap_cp_helper.c new file mode 100644 index 00000000000000..6a930f697ee598 --- /dev/null +++ b/drivers/clk/mvebu/armada_ap_cp_helper.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Marvell Armada AP and CP110 helper + * + * Copyright (C) 2018 Marvell + * + * Gregory Clement + * + */ + +#include "armada_ap_cp_helper.h" +#include +#include +#include + +char *ap_cp_unique_name(struct device *dev, struct device_node *np, + const char *name) +{ + const __be32 *reg; + u64 addr; + + /* Do not create a name if there is no clock */ + if (!name) + return NULL; + + reg = of_get_property(np, "reg", NULL); + addr = of_translate_address(np, reg); + return devm_kasprintf(dev, GFP_KERNEL, "%llx-%s", + (unsigned long long)addr, name); +} diff --git a/drivers/clk/mvebu/armada_ap_cp_helper.h b/drivers/clk/mvebu/armada_ap_cp_helper.h new file mode 100644 index 00000000000000..810af1e5dfa449 --- /dev/null +++ b/drivers/clk/mvebu/armada_ap_cp_helper.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __ARMADA_AP_CP_HELPER_H +#define __ARMADA_AP_CP_HELPER_H + +struct device; +struct device_node; + +char *ap_cp_unique_name(struct device *dev, struct device_node *np, + const char *name); +#endif diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index b6de283f45e373..808463276145b0 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -26,11 +26,11 @@ #define pr_fmt(fmt) "cp110-system-controller: " fmt +#include "armada_ap_cp_helper.h" #include #include #include #include -#include #include #include #include @@ -212,22 +212,6 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec, return ERR_PTR(-EINVAL); } -static char *cp110_unique_name(struct device *dev, struct device_node *np, - const char *name) -{ - const __be32 *reg; - u64 addr; - - /* Do not create a name if there is no clock */ - if (!name) - return NULL; - - reg = of_get_property(np, "reg", NULL); - addr = of_translate_address(np, reg); - return devm_kasprintf(dev, GFP_KERNEL, "%llx-%s", - (unsigned long long)addr, name); -} - static int cp110_syscon_common_probe(struct platform_device *pdev, struct device_node *syscon_node) { @@ -261,7 +245,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev, cp110_clk_data->num = CP110_CLK_NUM; /* Register the PLL0 which is the root of the hw tree */ - pll0_name = cp110_unique_name(dev, syscon_node, "pll0"); + pll0_name = ap_cp_unique_name(dev, syscon_node, "pll0"); hw = clk_hw_register_fixed_rate(NULL, pll0_name, NULL, 0, 1000 * 1000 * 1000); if (IS_ERR(hw)) { @@ -272,7 +256,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev, cp110_clks[CP110_CORE_PLL0] = hw; /* PPv2 is PLL0/3 */ - ppv2_name = cp110_unique_name(dev, syscon_node, "ppv2-core"); + ppv2_name = ap_cp_unique_name(dev, syscon_node, "ppv2-core"); hw = clk_hw_register_fixed_factor(NULL, ppv2_name, pll0_name, 0, 1, 3); if (IS_ERR(hw)) { ret = PTR_ERR(hw); @@ -282,7 +266,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev, cp110_clks[CP110_CORE_PPV2] = hw; /* X2CORE clock is PLL0/2 */ - x2core_name = cp110_unique_name(dev, syscon_node, "x2core"); + x2core_name = ap_cp_unique_name(dev, syscon_node, "x2core"); hw = clk_hw_register_fixed_factor(NULL, x2core_name, pll0_name, 0, 1, 2); if (IS_ERR(hw)) { @@ -293,7 +277,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev, cp110_clks[CP110_CORE_X2CORE] = hw; /* Core clock is X2CORE/2 */ - core_name = cp110_unique_name(dev, syscon_node, "core"); + core_name = ap_cp_unique_name(dev, syscon_node, "core"); hw = clk_hw_register_fixed_factor(NULL, core_name, x2core_name, 0, 1, 2); if (IS_ERR(hw)) { @@ -303,7 +287,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev, cp110_clks[CP110_CORE_CORE] = hw; /* NAND can be either PLL0/2.5 or core clock */ - nand_name = cp110_unique_name(dev, syscon_node, "nand-core"); + nand_name = ap_cp_unique_name(dev, syscon_node, "nand-core"); if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK) hw = clk_hw_register_fixed_factor(NULL, nand_name, pll0_name, 0, 2, 5); @@ -318,7 +302,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev, cp110_clks[CP110_CORE_NAND] = hw; /* SDIO clock is PLL0/2.5 */ - sdio_name = cp110_unique_name(dev, syscon_node, "sdio-core"); + sdio_name = ap_cp_unique_name(dev, syscon_node, "sdio-core"); hw = clk_hw_register_fixed_factor(NULL, sdio_name, pll0_name, 0, 2, 5); if (IS_ERR(hw)) { @@ -330,7 +314,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev, /* create the unique name for all the gate clocks */ for (i = 0; i < ARRAY_SIZE(gate_base_names); i++) - gate_name[i] = cp110_unique_name(dev, syscon_node, + gate_name[i] = ap_cp_unique_name(dev, syscon_node, gate_base_names[i]); for (i = 0; i < ARRAY_SIZE(gate_base_names); i++) { diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index e1ff83cc361eaf..32dbb4f0949209 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -21,7 +21,6 @@ if COMMON_CLK_QCOM config QCOM_A53PLL tristate "MSM8916 A53 PLL" - default ARCH_QCOM help Support for the A53 PLL on MSM8916 devices. It provides the CPU with frequencies above 1GHz. @@ -31,7 +30,6 @@ config QCOM_A53PLL config QCOM_CLK_APCS_MSM8916 tristate "MSM8916 APCS Clock Controller" depends on QCOM_APCS_IPC || COMPILE_TEST - default ARCH_QCOM help Support for the APCS Clock Controller on msm8916 devices. The APCS is managing the mux and divider which feeds the CPUs. @@ -292,6 +290,13 @@ config SDM_LPASSCC_845 Say Y if you want to use the LPASS branch clocks of the LPASS clock controller to reset the LPASS subsystem. +config SM_GCC_8150 + tristate "SM8150 Global Clock Controller" + help + Support for the global clock controller on SM8150 devices. + Say Y if you want to use peripheral devices such as UART, + SPI, I2C, USB, SD/UFS, PCIe etc. + config SPMI_PMIC_CLKDIV tristate "SPMI PMIC clkdiv Support" depends on SPMI || COMPILE_TEST diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index f0768fb1f037e1..4a813b4055d086 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o +obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o obj-$(CONFIG_QCOM_HFPLL) += hfpll.o diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 0ced4a5a9a171e..055318f9799153 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -32,6 +32,7 @@ # define PLL_LOCK_DET BIT(31) #define PLL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_L_VAL]) +#define PLL_CAL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_L_VAL]) #define PLL_ALPHA_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL]) #define PLL_ALPHA_VAL_U(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U]) @@ -44,14 +45,17 @@ # define PLL_VCO_MASK 0x3 #define PLL_USER_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U]) +#define PLL_USER_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U1]) #define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL]) #define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U]) +#define PLL_CONFIG_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1]) #define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL]) #define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U]) #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS]) #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE]) #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC]) +#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL]) const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [CLK_ALPHA_PLL_TYPE_DEFAULT] = { @@ -96,6 +100,22 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_OPMODE] = 0x2c, [PLL_OFF_FRAC] = 0x38, }, + [CLK_ALPHA_PLL_TYPE_TRION] = { + [PLL_OFF_L_VAL] = 0x04, + [PLL_OFF_CAL_L_VAL] = 0x08, + [PLL_OFF_USER_CTL] = 0x0c, + [PLL_OFF_USER_CTL_U] = 0x10, + [PLL_OFF_USER_CTL_U1] = 0x14, + [PLL_OFF_CONFIG_CTL] = 0x18, + [PLL_OFF_CONFIG_CTL_U] = 0x1c, + [PLL_OFF_CONFIG_CTL_U1] = 0x20, + [PLL_OFF_TEST_CTL] = 0x24, + [PLL_OFF_TEST_CTL_U] = 0x28, + [PLL_OFF_STATUS] = 0x30, + [PLL_OFF_OPMODE] = 0x38, + [PLL_OFF_ALPHA_VAL] = 0x40, + [PLL_OFF_CAL_VAL] = 0x44, + }, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); @@ -120,6 +140,10 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); #define FABIA_PLL_OUT_MASK 0x7 #define FABIA_PLL_RATE_MARGIN 500 +#define TRION_PLL_STANDBY 0x0 +#define TRION_PLL_RUN 0x1 +#define TRION_PLL_OUT_MASK 0x7 + #define pll_alpha_width(p) \ ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \ ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH) @@ -730,6 +754,130 @@ static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate, return alpha_huayra_pll_round_rate(rate, *prate, &l, &a); } +static int trion_pll_is_enabled(struct clk_alpha_pll *pll, + struct regmap *regmap) +{ + u32 mode_regval, opmode_regval; + int ret; + + ret = regmap_read(regmap, PLL_MODE(pll), &mode_regval); + ret |= regmap_read(regmap, PLL_OPMODE(pll), &opmode_regval); + if (ret) + return 0; + + return ((opmode_regval & TRION_PLL_RUN) && (mode_regval & PLL_OUTCTRL)); +} + +static int clk_trion_pll_is_enabled(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + + return trion_pll_is_enabled(pll, pll->clkr.regmap); +} + +static int clk_trion_pll_enable(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + struct regmap *regmap = pll->clkr.regmap; + u32 val; + int ret; + + ret = regmap_read(regmap, PLL_MODE(pll), &val); + if (ret) + return ret; + + /* If in FSM mode, just vote for it */ + if (val & PLL_VOTE_FSM_ENA) { + ret = clk_enable_regmap(hw); + if (ret) + return ret; + return wait_for_pll_enable_active(pll); + } + + /* Set operation mode to RUN */ + regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_RUN); + + ret = wait_for_pll_enable_lock(pll); + if (ret) + return ret; + + /* Enable the PLL outputs */ + ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), + TRION_PLL_OUT_MASK, TRION_PLL_OUT_MASK); + if (ret) + return ret; + + /* Enable the global PLL outputs */ + return regmap_update_bits(regmap, PLL_MODE(pll), + PLL_OUTCTRL, PLL_OUTCTRL); +} + +static void clk_trion_pll_disable(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + struct regmap *regmap = pll->clkr.regmap; + u32 val; + int ret; + + ret = regmap_read(regmap, PLL_MODE(pll), &val); + if (ret) + return; + + /* If in FSM mode, just unvote it */ + if (val & PLL_VOTE_FSM_ENA) { + clk_disable_regmap(hw); + return; + } + + /* Disable the global PLL output */ + ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); + if (ret) + return; + + /* Disable the PLL outputs */ + ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), + TRION_PLL_OUT_MASK, 0); + if (ret) + return; + + /* Place the PLL mode in STANDBY */ + regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_STANDBY); + regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); +} + +static unsigned long +clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + struct regmap *regmap = pll->clkr.regmap; + u32 l, frac; + u64 prate = parent_rate; + + regmap_read(regmap, PLL_L_VAL(pll), &l); + regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac); + + return alpha_pll_calc_rate(prate, l, frac, ALPHA_REG_16BIT_WIDTH); +} + +static long clk_trion_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + unsigned long min_freq, max_freq; + u32 l; + u64 a; + + rate = alpha_pll_round_rate(rate, *prate, + &l, &a, ALPHA_REG_16BIT_WIDTH); + if (!pll->vco_table || alpha_pll_find_vco(pll, rate)) + return rate; + + min_freq = pll->vco_table[0].min_freq; + max_freq = pll->vco_table[pll->num_vco - 1].max_freq; + + return clamp(rate, min_freq, max_freq); +} + const struct clk_ops clk_alpha_pll_ops = { .enable = clk_alpha_pll_enable, .disable = clk_alpha_pll_disable, @@ -760,6 +908,15 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = { }; EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops); +const struct clk_ops clk_trion_fixed_pll_ops = { + .enable = clk_trion_pll_enable, + .disable = clk_trion_pll_disable, + .is_enabled = clk_trion_pll_is_enabled, + .recalc_rate = clk_trion_pll_recalc_rate, + .round_rate = clk_trion_pll_round_rate, +}; +EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops); + static unsigned long clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -832,7 +989,7 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, int div; /* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */ - div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1; + div = DIV_ROUND_UP_ULL(parent_rate, rate) - 1; return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT, @@ -1036,11 +1193,6 @@ static unsigned long clk_alpha_pll_postdiv_fabia_recalc_rate(struct clk_hw *hw, u32 i, div = 1, val; int ret; - if (!pll->post_div_table) { - pr_err("Missing the post_div_table for the PLL\n"); - return -EINVAL; - } - ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val); if (ret) return ret; @@ -1058,16 +1210,71 @@ static unsigned long clk_alpha_pll_postdiv_fabia_recalc_rate(struct clk_hw *hw, return (parent_rate / div); } -static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw, - unsigned long rate, unsigned long *prate) +static unsigned long +clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); + struct regmap *regmap = pll->clkr.regmap; + u32 i, div = 1, val; - if (!pll->post_div_table) { - pr_err("Missing the post_div_table for the PLL\n"); - return -EINVAL; + regmap_read(regmap, PLL_USER_CTL(pll), &val); + + val >>= pll->post_div_shift; + val &= PLL_POST_DIV_MASK(pll); + + for (i = 0; i < pll->num_post_div; i++) { + if (pll->post_div_table[i].val == val) { + div = pll->post_div_table[i].div; + break; + } + } + + return (parent_rate / div); +} + +static long +clk_trion_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); + + return divider_round_rate(hw, rate, prate, pll->post_div_table, + pll->width, CLK_DIVIDER_ROUND_CLOSEST); +}; + +static int +clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); + struct regmap *regmap = pll->clkr.regmap; + int i, val = 0, div; + + div = DIV_ROUND_UP_ULL(parent_rate, rate); + for (i = 0; i < pll->num_post_div; i++) { + if (pll->post_div_table[i].div == div) { + val = pll->post_div_table[i].val; + break; + } } + return regmap_update_bits(regmap, PLL_USER_CTL(pll), + PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT, + val << PLL_POST_DIV_SHIFT); +} + +const struct clk_ops clk_trion_pll_postdiv_ops = { + .recalc_rate = clk_trion_pll_postdiv_recalc_rate, + .round_rate = clk_trion_pll_postdiv_round_rate, + .set_rate = clk_trion_pll_postdiv_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_trion_pll_postdiv_ops); + +static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw, + unsigned long rate, unsigned long *prate) +{ + struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); + return divider_round_rate(hw, rate, prate, pll->post_div_table, pll->width, CLK_DIVIDER_ROUND_CLOSEST); } @@ -1089,12 +1296,7 @@ static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw, if (val & PLL_VOTE_FSM_ENA) return 0; - if (!pll->post_div_table) { - pr_err("Missing the post_div_table for the PLL\n"); - return -EINVAL; - } - - div = DIV_ROUND_UP_ULL((u64)parent_rate, rate); + div = DIV_ROUND_UP_ULL(parent_rate, rate); for (i = 0; i < pll->num_post_div; i++) { if (pll->post_div_table[i].div == div) { val = pll->post_div_table[i].val; diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 66755f0f84fcb1..15f27f4b06df5e 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -13,22 +13,27 @@ enum { CLK_ALPHA_PLL_TYPE_HUAYRA, CLK_ALPHA_PLL_TYPE_BRAMMO, CLK_ALPHA_PLL_TYPE_FABIA, + CLK_ALPHA_PLL_TYPE_TRION, CLK_ALPHA_PLL_TYPE_MAX, }; enum { PLL_OFF_L_VAL, + PLL_OFF_CAL_L_VAL, PLL_OFF_ALPHA_VAL, PLL_OFF_ALPHA_VAL_U, PLL_OFF_USER_CTL, PLL_OFF_USER_CTL_U, + PLL_OFF_USER_CTL_U1, PLL_OFF_CONFIG_CTL, PLL_OFF_CONFIG_CTL_U, + PLL_OFF_CONFIG_CTL_U1, PLL_OFF_TEST_CTL, PLL_OFF_TEST_CTL_U, PLL_OFF_STATUS, PLL_OFF_OPMODE, PLL_OFF_FRAC, + PLL_OFF_CAL_VAL, PLL_OFF_MAX_REGS }; @@ -117,5 +122,7 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +extern const struct clk_ops clk_trion_fixed_pll_ops; +extern const struct clk_ops clk_trion_pll_postdiv_ops; #endif diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 8c02bffe50dfac..b98b81ef43a14a 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -119,7 +119,7 @@ static int update_config(struct clk_rcg2 *rcg) } WARN(1, "%s: rcg didn't update its configuration.", name); - return 0; + return -EBUSY; } static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) @@ -1105,8 +1105,6 @@ static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, rcg->freq_tbl = NULL; - pr_debug("DFS registered for clk %s\n", init->name); - return 0; } @@ -1117,12 +1115,8 @@ int qcom_cc_register_rcg_dfs(struct regmap *regmap, for (i = 0; i < len; i++) { ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); - if (ret) { - const char *name = rcgs[i].init->name; - - pr_err("DFS register failed for clk %s\n", name); + if (ret) return ret; - } } return 0; diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index a32bfaeb7e613a..96a36f6ff667d6 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -85,7 +85,10 @@ static DEFINE_MUTEX(rpmh_clk_lock); .hw.init = &(struct clk_init_data){ \ .ops = &clk_rpmh_ops, \ .name = #_name, \ - .parent_names = (const char *[]){ "xo_board" }, \ + .parent_data = &(const struct clk_parent_data){ \ + .fw_name = "xo", \ + .name = "xo_board", \ + }, \ .num_parents = 1, \ }, \ }; \ @@ -100,7 +103,10 @@ static DEFINE_MUTEX(rpmh_clk_lock); .hw.init = &(struct clk_init_data){ \ .ops = &clk_rpmh_ops, \ .name = #_name_active, \ - .parent_names = (const char *[]){ "xo_board" }, \ + .parent_data = &(const struct clk_parent_data){ \ + .fw_name = "xo", \ + .name = "xo_board", \ + }, \ .num_parents = 1, \ }, \ } @@ -358,6 +364,33 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = { .num_clks = ARRAY_SIZE(sdm845_rpmh_clocks), }; +DEFINE_CLK_RPMH_ARC(sm8150, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2); +DEFINE_CLK_RPMH_VRM(sm8150, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2); +DEFINE_CLK_RPMH_VRM(sm8150, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2); +DEFINE_CLK_RPMH_VRM(sm8150, rf_clk1, rf_clk1_ao, "rfclka1", 1); +DEFINE_CLK_RPMH_VRM(sm8150, rf_clk2, rf_clk2_ao, "rfclka2", 1); +DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1); + +static struct clk_hw *sm8150_rpmh_clocks[] = { + [RPMH_CXO_CLK] = &sm8150_bi_tcxo.hw, + [RPMH_CXO_CLK_A] = &sm8150_bi_tcxo_ao.hw, + [RPMH_LN_BB_CLK2] = &sm8150_ln_bb_clk2.hw, + [RPMH_LN_BB_CLK2_A] = &sm8150_ln_bb_clk2_ao.hw, + [RPMH_LN_BB_CLK3] = &sm8150_ln_bb_clk3.hw, + [RPMH_LN_BB_CLK3_A] = &sm8150_ln_bb_clk3_ao.hw, + [RPMH_RF_CLK1] = &sm8150_rf_clk1.hw, + [RPMH_RF_CLK1_A] = &sm8150_rf_clk1_ao.hw, + [RPMH_RF_CLK2] = &sm8150_rf_clk2.hw, + [RPMH_RF_CLK2_A] = &sm8150_rf_clk2_ao.hw, + [RPMH_RF_CLK3] = &sm8150_rf_clk3.hw, + [RPMH_RF_CLK3_A] = &sm8150_rf_clk3_ao.hw, +}; + +static const struct clk_rpmh_desc clk_rpmh_sm8150 = { + .clks = sm8150_rpmh_clocks, + .num_clks = ARRAY_SIZE(sm8150_rpmh_clocks), +}; + static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec, void *data) { @@ -386,6 +419,7 @@ static int clk_rpmh_probe(struct platform_device *pdev) hw_clks = desc->clks; for (i = 0; i < desc->num_clks; i++) { + const char *name = hw_clks[i]->init->name; u32 res_addr; size_t aux_data_len; const struct bcm_db *data; @@ -416,8 +450,7 @@ static int clk_rpmh_probe(struct platform_device *pdev) ret = devm_clk_hw_register(&pdev->dev, hw_clks[i]); if (ret) { - dev_err(&pdev->dev, "failed to register %s\n", - hw_clks[i]->init->name); + dev_err(&pdev->dev, "failed to register %s\n", name); return ret; } } @@ -437,6 +470,7 @@ static int clk_rpmh_probe(struct platform_device *pdev) static const struct of_device_id clk_rpmh_match_table[] = { { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845}, + { .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150}, { } }; MODULE_DEVICE_TABLE(of, clk_rpmh_match_table); diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index a6b2f86112d863..28ddc747d70352 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -306,4 +306,24 @@ int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc) } EXPORT_SYMBOL_GPL(qcom_cc_probe); +int qcom_cc_probe_by_index(struct platform_device *pdev, int index, + const struct qcom_cc_desc *desc) +{ + struct regmap *regmap; + struct resource *res; + void __iomem *base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, index); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return -ENOMEM; + + regmap = devm_regmap_init_mmio(&pdev->dev, base, desc->config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + return qcom_cc_really_probe(pdev, desc, regmap); +} +EXPORT_SYMBOL_GPL(qcom_cc_probe_by_index); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h index 1e2a8bdac55a37..bb39a7e106d8a9 100644 --- a/drivers/clk/qcom/common.h +++ b/drivers/clk/qcom/common.h @@ -61,5 +61,7 @@ extern int qcom_cc_really_probe(struct platform_device *pdev, struct regmap *regmap); extern int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc); +extern int qcom_cc_probe_by_index(struct platform_device *pdev, int index, + const struct qcom_cc_desc *desc); #endif diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 39ade58b4ada49..e01f5f591d1e25 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -1108,7 +1108,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index 033688264c7b74..091acd59c1d646 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -1042,7 +1042,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_parent_names_4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1066,7 +1066,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = { .name = "sdcc4_apps_clk_src", .parent_names = gcc_parent_names_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index 29cf464dd2c89b..bd32212f37e64b 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -1057,7 +1057,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_parent_names_13, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1103,7 +1103,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_parent_names_14, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -2604,6 +2604,32 @@ static struct clk_branch gcc_usb_hs_system_clk = { }, }; +static struct clk_branch gcc_wdsp_q6ss_ahbs_clk = { + .halt_reg = 0x1e004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_wdsp_q6ss_ahbs_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_wdsp_q6ss_axim_clk = { + .halt_reg = 0x1e008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_wdsp_q6ss_axim_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_hw *gcc_qcs404_hws[] = { &cxo.hw, }; @@ -2749,6 +2775,9 @@ static struct clk_regmap *gcc_qcs404_clocks[] = { [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, [GCC_DCC_CLK] = &gcc_dcc_clk.clkr, [GCC_DCC_XO_CLK] = &gcc_dcc_xo_clk.clkr, + [GCC_WCSS_Q6_AHB_CLK] = &gcc_wdsp_q6ss_ahbs_clk.clkr, + [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr, + }; static const struct qcom_reset_map gcc_qcs404_resets[] = { @@ -2774,6 +2803,7 @@ static const struct qcom_reset_map gcc_qcs404_resets[] = { [GCC_PCIE_0_SLEEP_ARES] = { 0x3e040, 1 }, [GCC_PCIE_0_PIPE_ARES] = { 0x3e040, 0 }, [GCC_EMAC_BCR] = { 0x4e000 }, + [GCC_WDSP_RESTART] = {0x19000}, }; static const struct regmap_config gcc_qcs404_regmap_config = { diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index 8827db23066f59..bf5730832ef3de 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -787,7 +787,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index 7131dcf9b0603a..95be125c3bddf1 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -685,7 +685,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { .name = "gcc_sdcc2_apps_clk_src", .parent_names = gcc_parent_names_10, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -709,7 +709,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { .name = "gcc_sdcc4_apps_clk_src", .parent_names = gcc_parent_names_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c new file mode 100644 index 00000000000000..20877214acffdd --- /dev/null +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -0,0 +1,3588 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "common.h" +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "reset.h" + +enum { + P_BI_TCXO, + P_AUD_REF_CLK, + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_EVEN, + P_GPLL0_OUT_MAIN, + P_GPLL7_OUT_MAIN, + P_GPLL9_OUT_MAIN, + P_SLEEP_CLK, +}; + +static const struct pll_vco trion_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static struct clk_alpha_pll gpll0 = { + .offset = 0x0, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + .name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_trion_fixed_pll_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_trion_even[] = { + { 0x0, 1 }, + { 0x1, 2 }, + { 0x3, 4 }, + { 0x7, 8 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_trion_even, + .num_post_div = ARRAY_SIZE(post_div_table_trion_even), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .width = 4, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll0_out_even", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + .name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_trion_pll_postdiv_ops, + }, +}; + +static struct clk_alpha_pll gpll7 = { + .offset = 0x1a000, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gpll7", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + .name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_trion_fixed_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll9 = { + .offset = 0x1c000, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gpll9", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + .name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_trion_fixed_pll_ops, + }, + }, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_0[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll0_out_even.clkr.hw }, + { .fw_name = "core_bi_pll_test_se" }, +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_1[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, + { .hw = &gpll0_out_even.clkr.hw }, + { .fw_name = "core_bi_pll_test_se" }, +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_2[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, + { .fw_name = "core_bi_pll_test_se" }, +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_3[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .fw_name = "core_bi_pll_test_se"}, +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_4[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .fw_name = "core_bi_pll_test_se" }, +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL7_OUT_MAIN, 3 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_5[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll7.clkr.hw }, + { .hw = &gpll0_out_even.clkr.hw }, + { .fw_name = "core_bi_pll_test_se" }, +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL9_OUT_MAIN, 2 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_6[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll9.clkr.hw }, + { .hw = &gpll0_out_even.clkr.hw }, + { .fw_name = "core_bi_pll_test_se" }, +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_AUD_REF_CLK, 2 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const struct clk_parent_data gcc_parents_7[] = { + { .fw_name = "bi_tcxo", .name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .fw_name = "aud_ref_clk", .name = "aud_ref_clk" }, + { .hw = &gpll0_out_even.clkr.hw }, + { .fw_name = "core_bi_pll_test_se" }, +}; + +static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { + .cmd_rcgr = 0x48014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0), + F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac_ptp_clk_src = { + .cmd_rcgr = 0x6038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_emac_ptp_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_emac_ptp_clk_src", + .parent_data = gcc_parents_5, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = { + F(2500000, P_BI_TCXO, 1, 25, 192), + F(5000000, P_BI_TCXO, 1, 25, 96), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0), + F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac_rgmii_clk_src = { + .cmd_rcgr = 0x601c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_emac_rgmii_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_emac_rgmii_clk_src", + .parent_data = gcc_parents_5, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x64004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk_src", + .parent_data = gcc_parents_1, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x65004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk_src", + .parent_data = gcc_parents_1, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x66004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk_src", + .parent_data = gcc_parents_1, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_aux_clk_src = { + .cmd_rcgr = 0x6b02c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk_src", + .parent_data = gcc_parents_2, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_aux_clk_src = { + .cmd_rcgr = 0x8d02c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_aux_clk_src", + .parent_data = gcc_parents_2, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = { + .cmd_rcgr = 0x6f014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_phy_refgen_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x33010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qspi_core_clk_src = { + .cmd_rcgr = 0x4b008, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qspi_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_core_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75), + F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375), + F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75), + F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625), + F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0), + F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75), + { } +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x17148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x17278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x173a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x174d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x17608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x17738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { + .cmd_rcgr = 0x17868, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s6_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { + .cmd_rcgr = 0x17998, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s7_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { + .cmd_rcgr = 0x18148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { + .cmd_rcgr = 0x18278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { + .cmd_rcgr = 0x183a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { + .cmd_rcgr = 0x184d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { + .cmd_rcgr = 0x18608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { + .cmd_rcgr = 0x18738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { + .cmd_rcgr = 0x1e148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s0_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { + .cmd_rcgr = 0x1e278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s1_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { + .cmd_rcgr = 0x1e3a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s2_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { + .cmd_rcgr = 0x1e4d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s3_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { + .cmd_rcgr = 0x1e608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s4_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { + .cmd_rcgr = 0x1e738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s5_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { + .cmd_rcgr = 0x1400c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk_src", + .parent_data = gcc_parents_6, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { + .cmd_rcgr = 0x1600c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc4_apps_clk_src", + .parent_data = gcc_parents_3, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = { + F(105495, P_BI_TCXO, 2, 1, 91), + { } +}; + +static struct clk_rcg2 gcc_tsif_ref_clk_src = { + .cmd_rcgr = 0x36010, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_tsif_ref_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_ref_clk_src", + .parent_data = gcc_parents_7, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = { + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_axi_clk_src = { + .cmd_rcgr = 0x75020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_axi_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = { + .cmd_rcgr = 0x75060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_ice_core_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_phy_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = { + .cmd_rcgr = 0x75094, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_phy_aux_clk_src", + .parent_data = gcc_parents_4, + .num_parents = 2, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = { + .cmd_rcgr = 0x75078, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_unipro_core_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = { + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { + .cmd_rcgr = 0x77020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = { + .cmd_rcgr = 0x77060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { + .cmd_rcgr = 0x77094, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk_src", + .parent_data = gcc_parents_4, + .num_parents = 2, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { + .cmd_rcgr = 0x77078, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = { + F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0), + F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0xf01c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0), + F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0xf034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_sec_master_clk_src = { + .cmd_rcgr = 0x1001c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_master_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = { + .cmd_rcgr = 0x10034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_mock_utmi_clk_src", + .parent_data = gcc_parents_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0xf060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_data = gcc_parents_2, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = { + .cmd_rcgr = 0x10060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_aux_clk_src", + .parent_data = gcc_parents_2, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = { + .halt_reg = 0x90018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_noc_pcie_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_card_axi_clk = { + .halt_reg = 0x750c0, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x750c0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x750c0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_card_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_axi_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = { + .halt_reg = 0x750c0, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x750c0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x750c0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_card_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_aggre_ufs_card_axi_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_clk = { + .halt_reg = 0x770c0, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x770c0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x770c0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_phy_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_axi_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = { + .halt_reg = 0x770c0, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x770c0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x770c0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_aggre_ufs_phy_axi_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_prim_axi_clk = { + .halt_reg = 0xf07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf07c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_sec_axi_clk = { + .halt_reg = 0x1007c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1007c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_usb3_sec_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_sec_master_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x38004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x38004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* + * Clock ON depends on external parent 'config noc', so cant poll + * delay and also mark as crtitical for camss boot + */ +static struct clk_branch gcc_camera_ahb_clk = { + .halt_reg = 0xb008, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0xb008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_hf_axi_clk = { + .halt_reg = 0xb030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_sf_axi_clk = { + .halt_reg = 0xb034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* XO critical input to camss, so no need to poll */ +static struct clk_branch gcc_camera_xo_clk = { + .halt_reg = 0xb044, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xb044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_xo_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0xf078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = { + .halt_reg = 0x10078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_sec_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_sec_master_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_ahb_clk = { + .halt_reg = 0x48000, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_cpuss_ahb_clk_src.clkr.hw }, + .num_parents = 1, + /* required for cpuss */ + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_dvm_bus_clk = { + .halt_reg = 0x48190, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x48190, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_dvm_bus_clk", + /* required for cpuss */ + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_gnoc_clk = { + .halt_reg = 0x48004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x48004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_gnoc_clk", + /* required for cpuss */ + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_rbcpr_clk = { + .halt_reg = 0x48008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x48008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_rbcpr_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_gpu_axi_clk = { + .halt_reg = 0x71154, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x71154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_gpu_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* + * Clock ON depends on external parent 'config noc', so cant poll + * delay and also mark as crtitical for disp boot + */ +static struct clk_branch gcc_disp_ahb_clk = { + .halt_reg = 0xb00c, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0xb00c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0xb038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_sf_axi_clk = { + .halt_reg = 0xb03c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* XO critical input to disp, so no need to poll */ +static struct clk_branch gcc_disp_xo_clk = { + .halt_reg = 0xb048, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xb048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_xo_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_axi_clk = { + .halt_reg = 0x6010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_ptp_clk = { + .halt_reg = 0x6034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_ptp_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_emac_ptp_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_rgmii_clk = { + .halt_reg = 0x6018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_rgmii_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_emac_rgmii_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_slv_ahb_clk = { + .halt_reg = 0x6014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x6014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_slv_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x64000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x64000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_gp1_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x65000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x65000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_gp2_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x66000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x66000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_gp3_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_cfg_ahb_clk = { + .halt_reg = 0x71004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x71004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x71004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_cfg_ahb_clk", + /* required for gpu */ + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_iref_clk = { + .halt_reg = 0x8c010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_iref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x7100c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x7100c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x71018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x71018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_at_clk = { + .halt_reg = 0x4d010, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4d010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_at_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_axi_clk = { + .halt_reg = 0x4d008, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4d008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_cfg_ahb_clk = { + .halt_reg = 0x4d004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x4d004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4d004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_cfg_ahb_clk", + /* required for npu */ + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_trig_clk = { + .halt_reg = 0x4d00c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4d00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_trig_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_phy_refgen_clk = { + .halt_reg = 0x6f02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie0_phy_refgen_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_pcie_phy_refgen_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_phy_refgen_clk = { + .halt_reg = 0x6f030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie1_phy_refgen_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_pcie_phy_refgen_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0x6b020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_pcie_0_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0x6b01c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_clkref_clk = { + .halt_reg = 0x8c00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0x6b018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* Clock ON depends on external parent 'PIPE' clock, so dont poll */ +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0x6b024, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0x6b014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = { + .halt_reg = 0x6b010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_aux_clk = { + .halt_reg = 0x8d020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(29), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_pcie_1_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { + .halt_reg = 0x8d01c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(28), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_clkref_clk = { + .halt_reg = 0x8c02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_mstr_axi_clk = { + .halt_reg = 0x8d018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* Clock ON depends on external parent 'PIPE' clock, so dont poll */ +static struct clk_branch gcc_pcie_1_pipe_clk = { + .halt_reg = 0x8d024, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(30), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_axi_clk = { + .halt_reg = 0x8d014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = { + .halt_reg = 0x8d010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_phy_aux_clk = { + .halt_reg = 0x6f004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_phy_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_pcie_0_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x3300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3300c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_pdm2_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x33004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x33004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x33004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x33008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x33008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x34004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = { + .halt_reg = 0xb018, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_nrt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_rt_ahb_clk = { + .halt_reg = 0xb01c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_rt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_ahb_clk = { + .halt_reg = 0xb020, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_disp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cvp_ahb_clk = { + .halt_reg = 0xb010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_cvp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0xb014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = { + .halt_reg = 0x4b000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_cnoc_periph_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qspi_core_clk = { + .halt_reg = 0x4b004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4b004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_core_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qspi_core_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x17144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s0_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x17274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s1_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x173a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s2_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x174d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s3_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x17604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s4_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x17734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s5_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s6_clk = { + .halt_reg = 0x17864, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s6_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s6_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s7_clk = { + .halt_reg = 0x17994, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s7_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap0_s7_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s0_clk = { + .halt_reg = 0x18144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s0_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap1_s0_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s1_clk = { + .halt_reg = 0x18274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(23), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s1_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap1_s1_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s2_clk = { + .halt_reg = 0x183a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(24), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s2_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap1_s2_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s3_clk = { + .halt_reg = 0x184d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s3_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap1_s3_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s4_clk = { + .halt_reg = 0x18604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s4_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap1_s4_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s5_clk = { + .halt_reg = 0x18734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s5_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap1_s5_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s0_clk = { + .halt_reg = 0x1e144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s0_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap2_s0_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s1_clk = { + .halt_reg = 0x1e274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s1_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap2_s1_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s2_clk = { + .halt_reg = 0x1e3a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s2_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap2_s2_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s3_clk = { + .halt_reg = 0x1e4d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s3_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap2_s3_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s4_clk = { + .halt_reg = 0x1e604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s4_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap2_s4_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s5_clk = { + .halt_reg = 0x1e734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s5_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_qupv3_wrap2_s5_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x17004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x17008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { + .halt_reg = 0x18004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { + .halt_reg = 0x18008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x18008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = { + .halt_reg = 0x1e004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_2_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = { + .halt_reg = 0x1e008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1e008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52014, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_2_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x14008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x14004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_sdcc2_apps_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc4_ahb_clk = { + .halt_reg = 0x16008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc4_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc4_apps_clk = { + .halt_reg = 0x16004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc4_apps_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_sdcc4_apps_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = { + .halt_reg = 0x4819c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sys_noc_cpuss_ahb_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_cpuss_ahb_clk_src.clkr.hw }, + .num_parents = 1, + /* required for cpuss */ + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_tsif_ahb_clk = { + .halt_reg = 0x36004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x36004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_tsif_inactivity_timers_clk = { + .halt_reg = 0x3600c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3600c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_inactivity_timers_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_tsif_ref_clk = { + .halt_reg = 0x36008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x36008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_ref_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_tsif_ref_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ahb_clk = { + .halt_reg = 0x75014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_axi_clk = { + .halt_reg = 0x75010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_axi_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = { + .halt_reg = 0x75010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75010, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_axi_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_clkref_clk = { + .halt_reg = 0x8c004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ice_core_clk = { + .halt_reg = 0x7505c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7505c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7505c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_ice_core_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_ice_core_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = { + .halt_reg = 0x7505c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7505c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7505c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_ice_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_ice_core_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_phy_aux_clk = { + .halt_reg = 0x75090, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75090, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_phy_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_phy_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = { + .halt_reg = 0x75090, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75090, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75090, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_phy_aux_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_phy_aux_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_unipro_core_clk = { + .halt_reg = 0x75058, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75058, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_unipro_core_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_unipro_core_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = { + .halt_reg = 0x75058, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75058, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75058, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_unipro_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_card_unipro_core_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_mem_clkref_clk = { + .halt_reg = 0x8c000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_mem_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ahb_clk = { + .halt_reg = 0x77014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_clk = { + .halt_reg = 0x77010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_axi_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = { + .halt_reg = 0x77010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77010, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_axi_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_clk = { + .halt_reg = 0x7705c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7705c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7705c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_ice_core_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = { + .halt_reg = 0x7705c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7705c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7705c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_ice_core_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_clk = { + .halt_reg = 0x77090, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77090, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_phy_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = { + .halt_reg = 0x77090, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77090, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77090, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_phy_aux_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_clk = { + .halt_reg = 0x77058, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77058, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_unipro_core_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = { + .halt_reg = 0x77058, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77058, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77058, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_ufs_phy_unipro_core_clk.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch_simple_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0xf010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0xf018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0xf014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_master_clk = { + .halt_reg = 0x10010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_master_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_sec_master_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_mock_utmi_clk = { + .halt_reg = 0x10018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_mock_utmi_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_sleep_clk = { + .halt_reg = 0x10014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_clkref_clk = { + .halt_reg = 0x8c008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_aux_clk = { + .halt_reg = 0xf050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0xf054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_clkref_clk = { + .halt_reg = 0x8c028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_aux_clk = { + .halt_reg = 0x10050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb3_sec_phy_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = { + .halt_reg = 0x10054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_com_aux_clk", + .parent_hws = (const struct clk_hw *[]){ + &gcc_usb3_sec_phy_aux_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +/* + * Clock ON depends on external parent 'config noc', so cant poll + * delay and also mark as crtitical for video boot + */ +static struct clk_branch gcc_video_ahb_clk = { + .halt_reg = 0xb004, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0xb004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0xb024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi1_clk = { + .halt_reg = 0xb028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axic_clk = { + .halt_reg = 0xb02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axic_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* XO critical input to video, so no need to poll */ +static struct clk_branch gcc_video_xo_clk = { + .halt_reg = 0xb040, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xb040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_xo_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gcc_sm8150_clocks[] = { + [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr, + [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr, + [GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] = + &gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr, + [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr, + [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = + &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr, + [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, + [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr, + [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, + [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr, + [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr, + [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr, + [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr, + [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr, + [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr, + [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr, + [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr, + [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr, + [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr, + [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr, + [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr, + [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr, + [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, + [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr, + [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr, + [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr, + [GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr, + [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr, + [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr, + [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr, + [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr, + [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr, + [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr, + [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr, + [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr, + [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr, + [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr, + [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr, + [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, + [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr, + [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr, + [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr, + [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr, + [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr, + [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr, + [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr, + [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr, + [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr, + [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr, + [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr, + [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr, + [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr, + [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr, + [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr, + [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr, + [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr, + [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr, + [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr, + [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr, + [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr, + [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr, + [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr, + [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr, + [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr, + [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr, + [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr, + [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr, + [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr, + [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr, + [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, + [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr, + [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr, + [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr, + [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr, + [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr, + [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr, + [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr, + [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr, + [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr, + [GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr, + [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr, + [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr, + [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr, + [GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] = + &gcc_ufs_card_ice_core_hw_ctl_clk.clkr, + [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr, + [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr, + [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = + &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = + &gcc_ufs_card_unipro_core_clk_src.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = + &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr, + [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr, + [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr, + [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr, + [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr, + [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr, + [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = + &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, + [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = + &gcc_ufs_phy_unipro_core_clk_src.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = + &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = + &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr, + [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr, + [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr, + [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = + &gcc_usb30_sec_mock_utmi_clk_src.clkr, + [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr, + [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr, + [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr, + [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr, + [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr, + [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, + [GCC_VIDEO_AXIC_CLK] = &gcc_video_axic_clk.clkr, + [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr, + [GPLL0] = &gpll0.clkr, + [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr, + [GPLL7] = &gpll7.clkr, + [GPLL9] = &gpll9.clkr, +}; + +static const struct qcom_reset_map gcc_sm8150_resets[] = { + [GCC_EMAC_BCR] = { 0x6000 }, + [GCC_GPU_BCR] = { 0x71000 }, + [GCC_MMSS_BCR] = { 0xb000 }, + [GCC_NPU_BCR] = { 0x4d000 }, + [GCC_PCIE_0_BCR] = { 0x6b000 }, + [GCC_PCIE_0_PHY_BCR] = { 0x6c01c }, + [GCC_PCIE_1_BCR] = { 0x8d000 }, + [GCC_PCIE_1_PHY_BCR] = { 0x8e01c }, + [GCC_PCIE_PHY_BCR] = { 0x6f000 }, + [GCC_PDM_BCR] = { 0x33000 }, + [GCC_PRNG_BCR] = { 0x34000 }, + [GCC_QSPI_BCR] = { 0x24008 }, + [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 }, + [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 }, + [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 }, + [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 }, + [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 }, + [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 }, + [GCC_USB3_PHY_SEC_BCR] = { 0x5000c }, + [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 }, + [GCC_SDCC2_BCR] = { 0x14000 }, + [GCC_SDCC4_BCR] = { 0x16000 }, + [GCC_TSIF_BCR] = { 0x36000 }, + [GCC_UFS_CARD_BCR] = { 0x75000 }, + [GCC_UFS_PHY_BCR] = { 0x77000 }, + [GCC_USB30_PRIM_BCR] = { 0xf000 }, + [GCC_USB30_SEC_BCR] = { 0x10000 }, + [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 }, +}; + +static const struct regmap_config gcc_sm8150_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x9c040, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_sm8150_desc = { + .config = &gcc_sm8150_regmap_config, + .clks = gcc_sm8150_clocks, + .num_clks = ARRAY_SIZE(gcc_sm8150_clocks), + .resets = gcc_sm8150_resets, + .num_resets = ARRAY_SIZE(gcc_sm8150_resets), +}; + +static const struct of_device_id gcc_sm8150_match_table[] = { + { .compatible = "qcom,gcc-sm8150" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_sm8150_match_table); + +static int gcc_sm8150_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gcc_sm8150_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* Disable the GPLL0 active input to NPU and GPU via MISC registers */ + regmap_update_bits(regmap, 0x4d110, 0x3, 0x3); + regmap_update_bits(regmap, 0x71028, 0x3, 0x3); + + return qcom_cc_really_probe(pdev, &gcc_sm8150_desc, regmap); +} + +static struct platform_driver gcc_sm8150_driver = { + .probe = gcc_sm8150_probe, + .driver = { + .name = "gcc-sm8150", + .of_match_table = gcc_sm8150_match_table, + }, +}; + +static int __init gcc_sm8150_init(void) +{ + return platform_driver_register(&gcc_sm8150_driver); +} +subsys_initcall(gcc_sm8150_init); + +static void __exit gcc_sm8150_exit(void) +{ + platform_driver_unregister(&gcc_sm8150_driver); +} +module_exit(gcc_sm8150_exit); + +MODULE_DESCRIPTION("QTI GCC SM8150 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/lpasscc-sdm845.c b/drivers/clk/qcom/lpasscc-sdm845.c index e246b99dfbc6d5..56d3e99288921a 100644 --- a/drivers/clk/qcom/lpasscc-sdm845.c +++ b/drivers/clk/qcom/lpasscc-sdm845.c @@ -112,25 +112,6 @@ static const struct qcom_cc_desc lpass_qdsp6ss_sdm845_desc = { .num_clks = ARRAY_SIZE(lpass_qdsp6ss_sdm845_clocks), }; -static int lpass_clocks_sdm845_probe(struct platform_device *pdev, int index, - const struct qcom_cc_desc *desc) -{ - struct regmap *regmap; - struct resource *res; - void __iomem *base; - - res = platform_get_resource(pdev, IORESOURCE_MEM, index); - base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); - - regmap = devm_regmap_init_mmio(&pdev->dev, base, desc->config); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); - - return qcom_cc_really_probe(pdev, desc, regmap); -} - static int lpass_cc_sdm845_probe(struct platform_device *pdev) { const struct qcom_cc_desc *desc; @@ -139,14 +120,14 @@ static int lpass_cc_sdm845_probe(struct platform_device *pdev) lpass_regmap_config.name = "cc"; desc = &lpass_cc_sdm845_desc; - ret = lpass_clocks_sdm845_probe(pdev, 0, desc); + ret = qcom_cc_probe_by_index(pdev, 0, desc); if (ret) return ret; lpass_regmap_config.name = "qdsp6ss"; desc = &lpass_qdsp6ss_sdm845_desc; - return lpass_clocks_sdm845_probe(pdev, 1, desc); + return qcom_cc_probe_by_index(pdev, 1, desc); } static const struct of_device_id lpass_cc_sdm845_match_table[] = { diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c index aa859e6ec9bd2b..4cfbbf5bf4d90e 100644 --- a/drivers/clk/qcom/turingcc-qcs404.c +++ b/drivers/clk/qcom/turingcc-qcs404.c @@ -96,7 +96,7 @@ static const struct regmap_config turingcc_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x30000, + .max_register = 0x23004, .fast_io = true, }; diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index 2db9093546c603..e326e6dc09fce6 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -334,7 +334,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np) return; pd->name = np->name; - pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | + GENPD_FLAG_ACTIVE_WAKEUP; pd->attach_dev = cpg_mstp_attach_dev; pd->detach_dev = cpg_mstp_detach_dev; pm_genpd_init(pd, &pm_domain_always_on_gov, false); diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index b33e1383efe3ab..1907ee195a08cf 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -421,7 +421,8 @@ static int r9a06g032_add_clk_domain(struct device *dev) return -ENOMEM; pd->name = np->name; - pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | + GENPD_FLAG_ACTIVE_WAKEUP; pd->attach_dev = r9a06g032_attach_dev; pd->detach_dev = r9a06g032_detach_dev; pm_genpd_init(pd, &pm_domain_always_on_gov, false); diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c index cc90b11a9c250a..b97f5f9326cfc7 100644 --- a/drivers/clk/renesas/rcar-usb2-clock-sel.c +++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c @@ -117,7 +117,6 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct usb2_clock_sel_priv *priv; - struct resource *res; struct clk *clk; struct clk_init_data init; @@ -125,8 +124,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index d4075b13067429..132cc96895e3a9 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -551,7 +551,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev, genpd = &pd->genpd; genpd->name = np->name; - genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | + GENPD_FLAG_ACTIVE_WAKEUP; genpd->attach_dev = cpg_mssr_attach_dev; genpd->detach_dev = cpg_mssr_detach_dev; pm_genpd_init(genpd, &pm_domain_always_on_gov, false); diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index ff35ab463a6f72..7c5b5813a87cbb 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -20,6 +20,7 @@ obj-y += clk-rk3128.o obj-y += clk-rk3188.o obj-y += clk-rk3228.o obj-y += clk-rk3288.o +obj-y += clk-rk3308.o obj-y += clk-rk3328.o obj-y += clk-rk3368.o obj-y += clk-rk3399.o diff --git a/drivers/clk/rockchip/clk-rk3308.c b/drivers/clk/rockchip/clk-rk3308.c new file mode 100644 index 00000000000000..b0baf87a283ed3 --- /dev/null +++ b/drivers/clk/rockchip/clk-rk3308.c @@ -0,0 +1,955 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2019 Rockchip Electronics Co. Ltd. + * Author: Finley Xiao + */ + +#include +#include +#include +#include +#include +#include +#include "clk.h" + +#define RK3308_GRF_SOC_STATUS0 0x380 + +enum rk3308_plls { + apll, dpll, vpll0, vpll1, +}; + +static struct rockchip_pll_rate_table rk3308_pll_rates[] = { + /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ + RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0), + RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0), + RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0), + RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0), + RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0), + RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0), + RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0), + RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0), + RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0), + RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0), + RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0), + RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0), + RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0), + RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0), + RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0), + RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0), + RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0), + RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0), + RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0), + RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0), + RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0), + RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0), + RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0), + RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0), + RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0), + RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0), + RK3036_PLL_RATE(900000000, 4, 300, 2, 1, 1, 0), + RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0), + RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0), + RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0), + RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0), + RK3036_PLL_RATE(800000000, 6, 400, 2, 1, 1, 0), + RK3036_PLL_RATE(700000000, 6, 350, 2, 1, 1, 0), + RK3036_PLL_RATE(696000000, 1, 58, 2, 1, 1, 0), + RK3036_PLL_RATE(624000000, 1, 52, 2, 1, 1, 0), + RK3036_PLL_RATE(600000000, 1, 75, 3, 1, 1, 0), + RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0), + RK3036_PLL_RATE(504000000, 1, 63, 3, 1, 1, 0), + RK3036_PLL_RATE(500000000, 6, 250, 2, 1, 1, 0), + RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0), + RK3036_PLL_RATE(312000000, 1, 52, 2, 2, 1, 0), + RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0), + RK3036_PLL_RATE(96000000, 1, 64, 4, 4, 1, 0), + { /* sentinel */ }, +}; + +#define RK3308_DIV_ACLKM_MASK 0x7 +#define RK3308_DIV_ACLKM_SHIFT 12 +#define RK3308_DIV_PCLK_DBG_MASK 0xf +#define RK3308_DIV_PCLK_DBG_SHIFT 8 + +#define RK3308_CLKSEL0(_aclk_core, _pclk_dbg) \ +{ \ + .reg = RK3308_CLKSEL_CON(0), \ + .val = HIWORD_UPDATE(_aclk_core, RK3308_DIV_ACLKM_MASK, \ + RK3308_DIV_ACLKM_SHIFT) | \ + HIWORD_UPDATE(_pclk_dbg, RK3308_DIV_PCLK_DBG_MASK, \ + RK3308_DIV_PCLK_DBG_SHIFT), \ +} + +#define RK3308_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \ +{ \ + .prate = _prate, \ + .divs = { \ + RK3308_CLKSEL0(_aclk_core, _pclk_dbg), \ + }, \ +} + +static struct rockchip_cpuclk_rate_table rk3308_cpuclk_rates[] __initdata = { + RK3308_CPUCLK_RATE(1608000000, 1, 7), + RK3308_CPUCLK_RATE(1512000000, 1, 7), + RK3308_CPUCLK_RATE(1488000000, 1, 5), + RK3308_CPUCLK_RATE(1416000000, 1, 5), + RK3308_CPUCLK_RATE(1392000000, 1, 5), + RK3308_CPUCLK_RATE(1296000000, 1, 5), + RK3308_CPUCLK_RATE(1200000000, 1, 5), + RK3308_CPUCLK_RATE(1104000000, 1, 5), + RK3308_CPUCLK_RATE(1008000000, 1, 5), + RK3308_CPUCLK_RATE(912000000, 1, 5), + RK3308_CPUCLK_RATE(816000000, 1, 3), + RK3308_CPUCLK_RATE(696000000, 1, 3), + RK3308_CPUCLK_RATE(600000000, 1, 3), + RK3308_CPUCLK_RATE(408000000, 1, 1), + RK3308_CPUCLK_RATE(312000000, 1, 1), + RK3308_CPUCLK_RATE(216000000, 1, 1), + RK3308_CPUCLK_RATE(96000000, 1, 1), +}; + +static const struct rockchip_cpuclk_reg_data rk3308_cpuclk_data = { + .core_reg = RK3308_CLKSEL_CON(0), + .div_core_shift = 0, + .div_core_mask = 0xf, + .mux_core_alt = 1, + .mux_core_main = 0, + .mux_core_shift = 6, + .mux_core_mask = 0x3, +}; + +PNAME(mux_pll_p) = { "xin24m" }; +PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc32k" }; +PNAME(mux_armclk_p) = { "apll_core", "vpll0_core", "vpll1_core" }; +PNAME(mux_dpll_vpll0_p) = { "dpll", "vpll0" }; +PNAME(mux_dpll_vpll0_xin24m_p) = { "dpll", "vpll0", "xin24m" }; +PNAME(mux_dpll_vpll0_vpll1_p) = { "dpll", "vpll0", "vpll1" }; +PNAME(mux_dpll_vpll0_vpll1_xin24m_p) = { "dpll", "vpll0", "vpll1", "xin24m" }; +PNAME(mux_dpll_vpll0_vpll1_usb480m_xin24m_p) = { "dpll", "vpll0", "vpll1", "usb480m", "xin24m" }; +PNAME(mux_vpll0_vpll1_p) = { "vpll0", "vpll1" }; +PNAME(mux_vpll0_vpll1_xin24m_p) = { "vpll0", "vpll1", "xin24m" }; +PNAME(mux_uart0_p) = { "clk_uart0_src", "dummy", "clk_uart0_frac" }; +PNAME(mux_uart1_p) = { "clk_uart1_src", "dummy", "clk_uart1_frac" }; +PNAME(mux_uart2_p) = { "clk_uart2_src", "dummy", "clk_uart2_frac" }; +PNAME(mux_uart3_p) = { "clk_uart3_src", "dummy", "clk_uart3_frac" }; +PNAME(mux_uart4_p) = { "clk_uart4_src", "dummy", "clk_uart4_frac" }; +PNAME(mux_timer_src_p) = { "xin24m", "clk_rtc32k" }; +PNAME(mux_dclk_vop_p) = { "dclk_vop_src", "dclk_vop_frac", "xin24m" }; +PNAME(mux_nandc_p) = { "clk_nandc_div", "clk_nandc_div50" }; +PNAME(mux_sdmmc_p) = { "clk_sdmmc_div", "clk_sdmmc_div50" }; +PNAME(mux_sdio_p) = { "clk_sdio_div", "clk_sdio_div50" }; +PNAME(mux_emmc_p) = { "clk_emmc_div", "clk_emmc_div50" }; +PNAME(mux_mac_p) = { "clk_mac_src", "mac_clkin" }; +PNAME(mux_mac_rmii_sel_p) = { "clk_mac_rx_tx_div20", "clk_mac_rx_tx_div2" }; +PNAME(mux_ddrstdby_p) = { "clk_ddrphy1x_out", "clk_ddr_stdby_div4" }; +PNAME(mux_rtc32k_p) = { "xin32k", "clk_pvtm_32k", "clk_rtc32k_frac", "clk_rtc32k_div" }; +PNAME(mux_usbphy_ref_p) = { "xin24m", "clk_usbphy_ref_src" }; +PNAME(mux_wifi_src_p) = { "clk_wifi_dpll", "clk_wifi_vpll0" }; +PNAME(mux_wifi_p) = { "clk_wifi_osc", "clk_wifi_src" }; +PNAME(mux_pdm_p) = { "clk_pdm_src", "clk_pdm_frac" }; +PNAME(mux_i2s0_8ch_tx_p) = { "clk_i2s0_8ch_tx_src", "clk_i2s0_8ch_tx_frac", "mclk_i2s0_8ch_in" }; +PNAME(mux_i2s0_8ch_tx_rx_p) = { "clk_i2s0_8ch_tx_mux", "clk_i2s0_8ch_rx_mux"}; +PNAME(mux_i2s0_8ch_tx_out_p) = { "clk_i2s0_8ch_tx", "xin12m" }; +PNAME(mux_i2s0_8ch_rx_p) = { "clk_i2s0_8ch_rx_src", "clk_i2s0_8ch_rx_frac", "mclk_i2s0_8ch_in" }; +PNAME(mux_i2s0_8ch_rx_tx_p) = { "clk_i2s0_8ch_rx_mux", "clk_i2s0_8ch_tx_mux"}; +PNAME(mux_i2s1_8ch_tx_p) = { "clk_i2s1_8ch_tx_src", "clk_i2s1_8ch_tx_frac", "mclk_i2s1_8ch_in" }; +PNAME(mux_i2s1_8ch_tx_rx_p) = { "clk_i2s1_8ch_tx_mux", "clk_i2s1_8ch_rx_mux"}; +PNAME(mux_i2s1_8ch_tx_out_p) = { "clk_i2s1_8ch_tx", "xin12m" }; +PNAME(mux_i2s1_8ch_rx_p) = { "clk_i2s1_8ch_rx_src", "clk_i2s1_8ch_rx_frac", "mclk_i2s1_8ch_in" }; +PNAME(mux_i2s1_8ch_rx_tx_p) = { "clk_i2s1_8ch_rx_mux", "clk_i2s1_8ch_tx_mux"}; +PNAME(mux_i2s2_8ch_tx_p) = { "clk_i2s2_8ch_tx_src", "clk_i2s2_8ch_tx_frac", "mclk_i2s2_8ch_in" }; +PNAME(mux_i2s2_8ch_tx_rx_p) = { "clk_i2s2_8ch_tx_mux", "clk_i2s2_8ch_rx_mux"}; +PNAME(mux_i2s2_8ch_tx_out_p) = { "clk_i2s2_8ch_tx", "xin12m" }; +PNAME(mux_i2s2_8ch_rx_p) = { "clk_i2s2_8ch_rx_src", "clk_i2s2_8ch_rx_frac", "mclk_i2s2_8ch_in" }; +PNAME(mux_i2s2_8ch_rx_tx_p) = { "clk_i2s2_8ch_rx_mux", "clk_i2s2_8ch_tx_mux"}; +PNAME(mux_i2s3_8ch_tx_p) = { "clk_i2s3_8ch_tx_src", "clk_i2s3_8ch_tx_frac", "mclk_i2s3_8ch_in" }; +PNAME(mux_i2s3_8ch_tx_rx_p) = { "clk_i2s3_8ch_tx_mux", "clk_i2s3_8ch_rx_mux"}; +PNAME(mux_i2s3_8ch_tx_out_p) = { "clk_i2s3_8ch_tx", "xin12m" }; +PNAME(mux_i2s3_8ch_rx_p) = { "clk_i2s3_8ch_rx_src", "clk_i2s3_8ch_rx_frac", "mclk_i2s3_8ch_in" }; +PNAME(mux_i2s3_8ch_rx_tx_p) = { "clk_i2s3_8ch_rx_mux", "clk_i2s3_8ch_tx_mux"}; +PNAME(mux_i2s0_2ch_p) = { "clk_i2s0_2ch_src", "clk_i2s0_2ch_frac", "mclk_i2s0_2ch_in" }; +PNAME(mux_i2s0_2ch_out_p) = { "clk_i2s0_2ch", "xin12m" }; +PNAME(mux_i2s1_2ch_p) = { "clk_i2s1_2ch_src", "clk_i2s1_2ch_frac", "mclk_i2s1_2ch_in"}; +PNAME(mux_i2s1_2ch_out_p) = { "clk_i2s1_2ch", "xin12m" }; +PNAME(mux_spdif_tx_src_p) = { "clk_spdif_tx_div", "clk_spdif_tx_div50" }; +PNAME(mux_spdif_tx_p) = { "clk_spdif_tx_src", "clk_spdif_tx_frac", "mclk_i2s0_2ch_in" }; +PNAME(mux_spdif_rx_src_p) = { "clk_spdif_rx_div", "clk_spdif_rx_div50" }; +PNAME(mux_spdif_rx_p) = { "clk_spdif_rx_src", "clk_spdif_rx_frac" }; + +static struct rockchip_pll_clock rk3308_pll_clks[] __initdata = { + [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p, + 0, RK3308_PLL_CON(0), + RK3308_MODE_CON, 0, 0, 0, rk3308_pll_rates), + [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p, + 0, RK3308_PLL_CON(8), + RK3308_MODE_CON, 2, 1, 0, rk3308_pll_rates), + [vpll0] = PLL(pll_rk3328, PLL_VPLL0, "vpll0", mux_pll_p, + 0, RK3308_PLL_CON(16), + RK3308_MODE_CON, 4, 2, 0, rk3308_pll_rates), + [vpll1] = PLL(pll_rk3328, PLL_VPLL1, "vpll1", mux_pll_p, + 0, RK3308_PLL_CON(24), + RK3308_MODE_CON, 6, 3, 0, rk3308_pll_rates), +}; + +#define MFLAGS CLK_MUX_HIWORD_MASK +#define DFLAGS CLK_DIVIDER_HIWORD_MASK +#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) + +static struct rockchip_clk_branch rk3308_uart0_fracmux __initdata = + MUX(0, "clk_uart0_mux", mux_uart0_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(11), 14, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_uart1_fracmux __initdata = + MUX(0, "clk_uart1_mux", mux_uart1_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(14), 14, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_uart2_fracmux __initdata = + MUX(0, "clk_uart2_mux", mux_uart2_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(17), 14, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_uart3_fracmux __initdata = + MUX(0, "clk_uart3_mux", mux_uart3_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(20), 14, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_uart4_fracmux __initdata = + MUX(0, "clk_uart4_mux", mux_uart4_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(23), 14, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_dclk_vop_fracmux __initdata = + MUX(0, "dclk_vop_mux", mux_dclk_vop_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(8), 14, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_rtc32k_fracmux __initdata = + MUX(SCLK_RTC32K, "clk_rtc32k", mux_rtc32k_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(2), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_pdm_fracmux __initdata = + MUX(0, "clk_pdm_mux", mux_pdm_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(46), 15, 1, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s0_8ch_tx_fracmux __initdata = + MUX(SCLK_I2S0_8CH_TX_MUX, "clk_i2s0_8ch_tx_mux", mux_i2s0_8ch_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(52), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s0_8ch_rx_fracmux __initdata = + MUX(SCLK_I2S0_8CH_RX_MUX, "clk_i2s0_8ch_rx_mux", mux_i2s0_8ch_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(54), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s1_8ch_tx_fracmux __initdata = + MUX(SCLK_I2S1_8CH_TX_MUX, "clk_i2s1_8ch_tx_mux", mux_i2s1_8ch_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(56), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s1_8ch_rx_fracmux __initdata = + MUX(SCLK_I2S1_8CH_RX_MUX, "clk_i2s1_8ch_rx_mux", mux_i2s1_8ch_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(58), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s2_8ch_tx_fracmux __initdata = + MUX(SCLK_I2S2_8CH_TX_MUX, "clk_i2s2_8ch_tx_mux", mux_i2s2_8ch_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(60), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s2_8ch_rx_fracmux __initdata = + MUX(SCLK_I2S2_8CH_RX_MUX, "clk_i2s2_8ch_rx_mux", mux_i2s2_8ch_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(62), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s3_8ch_tx_fracmux __initdata = + MUX(SCLK_I2S3_8CH_TX_MUX, "clk_i2s3_8ch_tx_mux", mux_i2s3_8ch_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(64), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s3_8ch_rx_fracmux __initdata = + MUX(SCLK_I2S3_8CH_RX_MUX, "clk_i2s3_8ch_rx_mux", mux_i2s3_8ch_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(66), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s0_2ch_fracmux __initdata = + MUX(0, "clk_i2s0_2ch_mux", mux_i2s0_2ch_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(68), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_i2s1_2ch_fracmux __initdata = + MUX(0, "clk_i2s1_2ch_mux", mux_i2s1_2ch_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(70), 10, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_spdif_tx_fracmux __initdata = + MUX(0, "clk_spdif_tx_mux", mux_spdif_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(48), 14, 2, MFLAGS); + +static struct rockchip_clk_branch rk3308_spdif_rx_fracmux __initdata = + MUX(0, "clk_spdif_rx_mux", mux_spdif_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(50), 15, 1, MFLAGS); + + +static struct rockchip_clk_branch rk3308_clk_branches[] __initdata = { + /* + * Clock-Architecture Diagram 1 + */ + + MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT, + RK3308_MODE_CON, 8, 2, MFLAGS), + FACTOR(0, "xin12m", "xin24m", 0, 1, 2), + + /* + * Clock-Architecture Diagram 2 + */ + + GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(0), 0, GFLAGS), + GATE(0, "vpll0_core", "vpll0", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(0), 0, GFLAGS), + GATE(0, "vpll1_core", "vpll1", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(0), 0, GFLAGS), + COMPOSITE_NOMUX(0, "pclk_core_dbg", "armclk", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(0), 8, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, + RK3308_CLKGATE_CON(0), 2, GFLAGS), + COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(0), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, + RK3308_CLKGATE_CON(0), 1, GFLAGS), + + GATE(0, "clk_jtag", "jtag_clkin", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(0), 3, GFLAGS), + + GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, + RK3308_CLKGATE_CON(0), 4, GFLAGS), + + /* + * Clock-Architecture Diagram 3 + */ + + COMPOSITE_NODIV(ACLK_BUS_SRC, "clk_bus_src", mux_dpll_vpll0_vpll1_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(5), 6, 2, MFLAGS, + RK3308_CLKGATE_CON(1), 0, GFLAGS), + COMPOSITE_NOMUX(PCLK_BUS, "pclk_bus", "clk_bus_src", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(6), 8, 5, DFLAGS, + RK3308_CLKGATE_CON(1), 3, GFLAGS), + GATE(PCLK_DDR, "pclk_ddr", "pclk_bus", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(4), 15, GFLAGS), + COMPOSITE_NOMUX(HCLK_BUS, "hclk_bus", "clk_bus_src", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(6), 0, 5, DFLAGS, + RK3308_CLKGATE_CON(1), 2, GFLAGS), + COMPOSITE_NOMUX(ACLK_BUS, "aclk_bus", "clk_bus_src", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(5), 0, 5, DFLAGS, + RK3308_CLKGATE_CON(1), 1, GFLAGS), + + COMPOSITE(0, "clk_uart0_src", mux_dpll_vpll0_vpll1_usb480m_xin24m_p, 0, + RK3308_CLKSEL_CON(10), 13, 3, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(1), 9, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(12), 0, + RK3308_CLKGATE_CON(1), 11, GFLAGS, + &rk3308_uart0_fracmux), + GATE(SCLK_UART0, "clk_uart0", "clk_uart0_mux", 0, + RK3308_CLKGATE_CON(1), 12, GFLAGS), + + COMPOSITE(0, "clk_uart1_src", mux_dpll_vpll0_vpll1_usb480m_xin24m_p, 0, + RK3308_CLKSEL_CON(13), 13, 3, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(1), 13, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(15), 0, + RK3308_CLKGATE_CON(1), 15, GFLAGS, + &rk3308_uart1_fracmux), + GATE(SCLK_UART1, "clk_uart1", "clk_uart1_mux", 0, + RK3308_CLKGATE_CON(2), 0, GFLAGS), + + COMPOSITE(0, "clk_uart2_src", mux_dpll_vpll0_vpll1_usb480m_xin24m_p, 0, + RK3308_CLKSEL_CON(16), 13, 3, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(2), 1, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(18), 0, + RK3308_CLKGATE_CON(2), 3, GFLAGS, + &rk3308_uart2_fracmux), + GATE(SCLK_UART2, "clk_uart2", "clk_uart2_mux", CLK_SET_RATE_PARENT, + RK3308_CLKGATE_CON(2), 4, GFLAGS), + + COMPOSITE(0, "clk_uart3_src", mux_dpll_vpll0_vpll1_usb480m_xin24m_p, 0, + RK3308_CLKSEL_CON(19), 13, 3, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(2), 5, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(21), 0, + RK3308_CLKGATE_CON(2), 7, GFLAGS, + &rk3308_uart3_fracmux), + GATE(SCLK_UART3, "clk_uart3", "clk_uart3_mux", 0, + RK3308_CLKGATE_CON(2), 8, GFLAGS), + + COMPOSITE(0, "clk_uart4_src", mux_dpll_vpll0_vpll1_usb480m_xin24m_p, 0, + RK3308_CLKSEL_CON(22), 13, 3, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(2), 9, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(24), 0, + RK3308_CLKGATE_CON(2), 11, GFLAGS, + &rk3308_uart4_fracmux), + GATE(SCLK_UART4, "clk_uart4", "clk_uart4_mux", 0, + RK3308_CLKGATE_CON(2), 12, GFLAGS), + + COMPOSITE(SCLK_I2C0, "clk_i2c0", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(25), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(2), 13, GFLAGS), + COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(26), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(2), 14, GFLAGS), + COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(27), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(2), 15, GFLAGS), + COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(28), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(3), 0, GFLAGS), + + COMPOSITE(SCLK_PWM0, "clk_pwm0", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(29), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(3), 1, GFLAGS), + COMPOSITE(SCLK_PWM1, "clk_pwm1", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(74), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(15), 0, GFLAGS), + COMPOSITE(SCLK_PWM2, "clk_pwm2", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(75), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(15), 1, GFLAGS), + + COMPOSITE(SCLK_SPI0, "clk_spi0", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(30), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(3), 2, GFLAGS), + COMPOSITE(SCLK_SPI1, "clk_spi1", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(31), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(3), 3, GFLAGS), + COMPOSITE(SCLK_SPI2, "clk_spi2", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(32), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(3), 4, GFLAGS), + + GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0, + RK3308_CLKGATE_CON(3), 10, GFLAGS), + GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0, + RK3308_CLKGATE_CON(3), 11, GFLAGS), + GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0, + RK3308_CLKGATE_CON(3), 12, GFLAGS), + GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0, + RK3308_CLKGATE_CON(3), 13, GFLAGS), + GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0, + RK3308_CLKGATE_CON(3), 14, GFLAGS), + GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0, + RK3308_CLKGATE_CON(3), 15, GFLAGS), + + COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "xin24m", 0, + RK3308_CLKSEL_CON(33), 0, 11, DFLAGS, + RK3308_CLKGATE_CON(3), 5, GFLAGS), + COMPOSITE_NOMUX(SCLK_SARADC, "clk_saradc", "xin24m", 0, + RK3308_CLKSEL_CON(34), 0, 11, DFLAGS, + RK3308_CLKGATE_CON(3), 6, GFLAGS), + + COMPOSITE_NOMUX(SCLK_OTP, "clk_otp", "xin24m", 0, + RK3308_CLKSEL_CON(35), 0, 4, DFLAGS, + RK3308_CLKGATE_CON(3), 7, GFLAGS), + COMPOSITE_NOMUX(SCLK_OTP_USR, "clk_otp_usr", "clk_otp", 0, + RK3308_CLKSEL_CON(35), 4, 2, DFLAGS, + RK3308_CLKGATE_CON(3), 8, GFLAGS), + + GATE(SCLK_CPU_BOOST, "clk_cpu_boost", "xin24m", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(3), 9, GFLAGS), + + COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_dpll_vpll0_vpll1_p, 0, + RK3308_CLKSEL_CON(7), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(1), 4, GFLAGS), + COMPOSITE(SCLK_CRYPTO_APK, "clk_crypto_apk", mux_dpll_vpll0_vpll1_p, 0, + RK3308_CLKSEL_CON(7), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3308_CLKGATE_CON(1), 5, GFLAGS), + + COMPOSITE(0, "dclk_vop_src", mux_dpll_vpll0_vpll1_p, 0, + RK3308_CLKSEL_CON(8), 10, 2, MFLAGS, 0, 8, DFLAGS, + RK3308_CLKGATE_CON(1), 6, GFLAGS), + COMPOSITE_FRACMUX(0, "dclk_vop_frac", "dclk_vop_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(9), 0, + RK3308_CLKGATE_CON(1), 7, GFLAGS, + &rk3308_dclk_vop_fracmux), + GATE(DCLK_VOP, "dclk_vop", "dclk_vop_mux", 0, + RK3308_CLKGATE_CON(1), 8, GFLAGS), + + /* + * Clock-Architecture Diagram 4 + */ + + COMPOSITE_NODIV(ACLK_PERI_SRC, "clk_peri_src", mux_dpll_vpll0_vpll1_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(36), 6, 2, MFLAGS, + RK3308_CLKGATE_CON(8), 0, GFLAGS), + COMPOSITE_NOMUX(ACLK_PERI, "aclk_peri", "clk_peri_src", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(36), 0, 5, DFLAGS, + RK3308_CLKGATE_CON(8), 1, GFLAGS), + COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "clk_peri_src", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(37), 0, 5, DFLAGS, + RK3308_CLKGATE_CON(8), 2, GFLAGS), + COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "clk_peri_src", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(37), 8, 5, DFLAGS, + RK3308_CLKGATE_CON(8), 3, GFLAGS), + + COMPOSITE(SCLK_NANDC_DIV, "clk_nandc_div", mux_dpll_vpll0_vpll1_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(38), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(8), 4, GFLAGS), + COMPOSITE(SCLK_NANDC_DIV50, "clk_nandc_div50", mux_dpll_vpll0_vpll1_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(38), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(8), 4, GFLAGS), + COMPOSITE_NODIV(SCLK_NANDC, "clk_nandc", mux_nandc_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3308_CLKSEL_CON(38), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(8), 5, GFLAGS), + + COMPOSITE(SCLK_SDMMC_DIV, "clk_sdmmc_div", mux_dpll_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(39), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3308_CLKGATE_CON(8), 6, GFLAGS), + COMPOSITE(SCLK_SDMMC_DIV50, "clk_sdmmc_div50", mux_dpll_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(39), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3308_CLKGATE_CON(8), 6, GFLAGS), + COMPOSITE_NODIV(SCLK_SDMMC, "clk_sdmmc", mux_sdmmc_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3308_CLKSEL_CON(39), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(8), 7, GFLAGS), + MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3308_SDMMC_CON0, 1), + MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", RK3308_SDMMC_CON1, 1), + + COMPOSITE(SCLK_SDIO_DIV, "clk_sdio_div", mux_dpll_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(40), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3308_CLKGATE_CON(8), 8, GFLAGS), + COMPOSITE(SCLK_SDIO_DIV50, "clk_sdio_div50", mux_dpll_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(40), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3308_CLKGATE_CON(8), 8, GFLAGS), + COMPOSITE_NODIV(SCLK_SDIO, "clk_sdio", mux_sdio_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3308_CLKSEL_CON(40), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(8), 9, GFLAGS), + MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3308_SDIO_CON0, 1), + MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", RK3308_SDIO_CON1, 1), + + COMPOSITE(SCLK_EMMC_DIV, "clk_emmc_div", mux_dpll_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(41), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3308_CLKGATE_CON(8), 10, GFLAGS), + COMPOSITE(SCLK_EMMC_DIV50, "clk_emmc_div50", mux_dpll_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(41), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3308_CLKGATE_CON(8), 10, GFLAGS), + COMPOSITE_NODIV(SCLK_EMMC, "clk_emmc", mux_emmc_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3308_CLKSEL_CON(41), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(8), 11, GFLAGS), + MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", RK3308_EMMC_CON0, 1), + MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", RK3308_EMMC_CON1, 1), + + COMPOSITE(SCLK_SFC, "clk_sfc", mux_dpll_vpll0_vpll1_p, 0, + RK3308_CLKSEL_CON(42), 14, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(8), 12, GFLAGS), + + GATE(SCLK_OTG_ADP, "clk_otg_adp", "clk_rtc32k", 0, + RK3308_CLKGATE_CON(8), 13, GFLAGS), + + COMPOSITE(SCLK_MAC_SRC, "clk_mac_src", mux_dpll_vpll0_vpll1_p, 0, + RK3308_CLKSEL_CON(43), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3308_CLKGATE_CON(8), 14, GFLAGS), + MUX(SCLK_MAC, "clk_mac", mux_mac_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(43), 14, 1, MFLAGS), + GATE(SCLK_MAC_REF, "clk_mac_ref", "clk_mac", 0, + RK3308_CLKGATE_CON(9), 1, GFLAGS), + GATE(SCLK_MAC_RX_TX, "clk_mac_rx_tx", "clk_mac", 0, + RK3308_CLKGATE_CON(9), 0, GFLAGS), + FACTOR(0, "clk_mac_rx_tx_div2", "clk_mac_rx_tx", 0, 1, 2), + FACTOR(0, "clk_mac_rx_tx_div20", "clk_mac_rx_tx", 0, 1, 20), + MUX(SCLK_MAC_RMII, "clk_mac_rmii_sel", mux_mac_rmii_sel_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(43), 15, 1, MFLAGS), + + COMPOSITE(SCLK_OWIRE, "clk_owire", mux_dpll_vpll0_xin24m_p, 0, + RK3308_CLKSEL_CON(44), 14, 2, MFLAGS, 8, 6, DFLAGS, + RK3308_CLKGATE_CON(8), 15, GFLAGS), + + /* + * Clock-Architecture Diagram 5 + */ + + GATE(0, "clk_ddr_mon_timer", "xin24m", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(0), 12, GFLAGS), + + GATE(0, "clk_ddr_mon", "clk_ddrphy1x_out", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(4), 10, GFLAGS), + GATE(0, "clk_ddr_upctrl", "clk_ddrphy1x_out", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(4), 11, GFLAGS), + GATE(0, "clk_ddr_msch", "clk_ddrphy1x_out", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(4), 12, GFLAGS), + GATE(0, "clk_ddr_msch_peribus", "clk_ddrphy1x_out", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(4), 13, GFLAGS), + + COMPOSITE(SCLK_DDRCLK, "clk_ddrphy4x_src", mux_dpll_vpll0_vpll1_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(1), 6, 2, MFLAGS, 0, 3, DFLAGS, + RK3308_CLKGATE_CON(0), 10, GFLAGS), + GATE(0, "clk_ddrphy4x", "clk_ddrphy4x_src", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(0), 11, GFLAGS), + FACTOR_GATE(0, "clk_ddr_stdby_div4", "clk_ddrphy4x", CLK_IGNORE_UNUSED, 1, 4, + RK3308_CLKGATE_CON(0), 13, GFLAGS), + COMPOSITE_NODIV(0, "clk_ddrstdby", mux_ddrstdby_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(1), 8, 1, MFLAGS, + RK3308_CLKGATE_CON(4), 14, GFLAGS), + + /* + * Clock-Architecture Diagram 6 + */ + + GATE(PCLK_PMU, "pclk_pmu", "pclk_bus", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(4), 5, GFLAGS), + GATE(SCLK_PMU, "clk_pmu", "pclk_bus", CLK_IGNORE_UNUSED, + RK3308_CLKGATE_CON(4), 6, GFLAGS), + + COMPOSITE_FRACMUX(0, "clk_rtc32k_frac", "xin24m", CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(3), 0, + RK3308_CLKGATE_CON(4), 3, GFLAGS, + &rk3308_rtc32k_fracmux), + MUX(0, "clk_rtc32k_div_src", mux_vpll0_vpll1_p, 0, + RK3308_CLKSEL_CON(2), 10, 1, MFLAGS), + COMPOSITE_NOMUX(0, "clk_rtc32k_div", "clk_rtc32k_div_src", CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(4), 0, 16, DFLAGS, + RK3308_CLKGATE_CON(4), 2, GFLAGS), + + COMPOSITE(0, "clk_usbphy_ref_src", mux_dpll_vpll0_p, 0, + RK3308_CLKSEL_CON(72), 6, 1, MFLAGS, 0, 6, DFLAGS, + RK3308_CLKGATE_CON(4), 7, GFLAGS), + COMPOSITE_NODIV(SCLK_USBPHY_REF, "clk_usbphy_ref", mux_usbphy_ref_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(72), 7, 1, MFLAGS, + RK3308_CLKGATE_CON(4), 8, GFLAGS), + + GATE(0, "clk_wifi_dpll", "dpll", 0, + RK3308_CLKGATE_CON(15), 2, GFLAGS), + GATE(0, "clk_wifi_vpll0", "vpll0", 0, + RK3308_CLKGATE_CON(15), 3, GFLAGS), + GATE(0, "clk_wifi_osc", "xin24m", 0, + RK3308_CLKGATE_CON(15), 4, GFLAGS), + COMPOSITE(0, "clk_wifi_src", mux_wifi_src_p, 0, + RK3308_CLKSEL_CON(44), 6, 1, MFLAGS, 0, 6, DFLAGS, + RK3308_CLKGATE_CON(4), 0, GFLAGS), + COMPOSITE_NODIV(SCLK_WIFI, "clk_wifi", mux_wifi_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(44), 7, 1, MFLAGS, + RK3308_CLKGATE_CON(4), 1, GFLAGS), + + GATE(SCLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", 0, + RK3308_CLKGATE_CON(4), 4, GFLAGS), + + /* + * Clock-Architecture Diagram 7 + */ + + COMPOSITE_NODIV(0, "clk_audio_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(45), 6, 2, MFLAGS, + RK3308_CLKGATE_CON(10), 0, GFLAGS), + COMPOSITE_NOMUX(HCLK_AUDIO, "hclk_audio", "clk_audio_src", 0, + RK3308_CLKSEL_CON(45), 0, 5, DFLAGS, + RK3308_CLKGATE_CON(10), 1, GFLAGS), + COMPOSITE_NOMUX(PCLK_AUDIO, "pclk_audio", "clk_audio_src", 0, + RK3308_CLKSEL_CON(45), 8, 5, DFLAGS, + RK3308_CLKGATE_CON(10), 2, GFLAGS), + + COMPOSITE(0, "clk_pdm_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(46), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(10), 3, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_pdm_frac", "clk_pdm_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(47), 0, + RK3308_CLKGATE_CON(10), 4, GFLAGS, + &rk3308_pdm_fracmux), + GATE(SCLK_PDM, "clk_pdm", "clk_pdm_mux", 0, + RK3308_CLKGATE_CON(10), 5, GFLAGS), + + COMPOSITE(SCLK_I2S0_8CH_TX_SRC, "clk_i2s0_8ch_tx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(52), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(10), 12, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s0_8ch_tx_frac", "clk_i2s0_8ch_tx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(53), 0, + RK3308_CLKGATE_CON(10), 13, GFLAGS, + &rk3308_i2s0_8ch_tx_fracmux), + COMPOSITE_NODIV(SCLK_I2S0_8CH_TX, "clk_i2s0_8ch_tx", mux_i2s0_8ch_tx_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(52), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(10), 14, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S0_8CH_TX_OUT, "clk_i2s0_8ch_tx_out", mux_i2s0_8ch_tx_out_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(52), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(10), 15, GFLAGS), + + COMPOSITE(SCLK_I2S0_8CH_RX_SRC, "clk_i2s0_8ch_rx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(54), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(11), 0, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s0_8ch_rx_frac", "clk_i2s0_8ch_rx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(55), 0, + RK3308_CLKGATE_CON(11), 1, GFLAGS, + &rk3308_i2s0_8ch_rx_fracmux), + COMPOSITE_NODIV(SCLK_I2S0_8CH_RX, "clk_i2s0_8ch_rx", mux_i2s0_8ch_rx_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(54), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(11), 2, GFLAGS), + GATE(SCLK_I2S0_8CH_RX_OUT, "clk_i2s0_8ch_rx_out", "clk_i2s0_8ch_rx", 0, + RK3308_CLKGATE_CON(11), 3, GFLAGS), + + COMPOSITE(SCLK_I2S1_8CH_TX_SRC, "clk_i2s1_8ch_tx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(56), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(11), 4, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s1_8ch_tx_frac", "clk_i2s1_8ch_tx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(57), 0, + RK3308_CLKGATE_CON(11), 5, GFLAGS, + &rk3308_i2s1_8ch_tx_fracmux), + COMPOSITE_NODIV(SCLK_I2S1_8CH_TX, "clk_i2s1_8ch_tx", mux_i2s1_8ch_tx_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(56), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(11), 6, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S1_8CH_TX_OUT, "clk_i2s1_8ch_tx_out", mux_i2s1_8ch_tx_out_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(56), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(11), 7, GFLAGS), + + COMPOSITE(SCLK_I2S1_8CH_RX_SRC, "clk_i2s1_8ch_rx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(58), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(11), 8, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s1_8ch_rx_frac", "clk_i2s1_8ch_rx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(59), 0, + RK3308_CLKGATE_CON(11), 9, GFLAGS, + &rk3308_i2s1_8ch_rx_fracmux), + COMPOSITE_NODIV(SCLK_I2S1_8CH_RX, "clk_i2s1_8ch_rx", mux_i2s1_8ch_rx_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(58), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(11), 10, GFLAGS), + GATE(SCLK_I2S1_8CH_RX_OUT, "clk_i2s1_8ch_rx_out", "clk_i2s1_8ch_rx", 0, + RK3308_CLKGATE_CON(11), 11, GFLAGS), + + COMPOSITE(SCLK_I2S2_8CH_TX_SRC, "clk_i2s2_8ch_tx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(60), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(11), 12, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s2_8ch_tx_frac", "clk_i2s2_8ch_tx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(61), 0, + RK3308_CLKGATE_CON(11), 13, GFLAGS, + &rk3308_i2s2_8ch_tx_fracmux), + COMPOSITE_NODIV(SCLK_I2S2_8CH_TX, "clk_i2s2_8ch_tx", mux_i2s2_8ch_tx_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(60), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(11), 14, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S2_8CH_TX_OUT, "clk_i2s2_8ch_tx_out", mux_i2s2_8ch_tx_out_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(60), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(11), 15, GFLAGS), + + COMPOSITE(SCLK_I2S2_8CH_RX_SRC, "clk_i2s2_8ch_rx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(62), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(12), 0, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s2_8ch_rx_frac", "clk_i2s2_8ch_rx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(63), 0, + RK3308_CLKGATE_CON(12), 1, GFLAGS, + &rk3308_i2s2_8ch_rx_fracmux), + COMPOSITE_NODIV(SCLK_I2S2_8CH_RX, "clk_i2s2_8ch_rx", mux_i2s2_8ch_rx_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(62), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(12), 2, GFLAGS), + GATE(SCLK_I2S2_8CH_RX_OUT, "clk_i2s2_8ch_rx_out", "clk_i2s2_8ch_rx", 0, + RK3308_CLKGATE_CON(12), 3, GFLAGS), + + COMPOSITE(SCLK_I2S3_8CH_TX_SRC, "clk_i2s3_8ch_tx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(64), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(12), 4, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s3_8ch_tx_frac", "clk_i2s3_8ch_tx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(65), 0, + RK3308_CLKGATE_CON(12), 5, GFLAGS, + &rk3308_i2s3_8ch_tx_fracmux), + COMPOSITE_NODIV(SCLK_I2S3_8CH_TX, "clk_i2s3_8ch_tx", mux_i2s3_8ch_tx_rx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(64), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(12), 6, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S3_8CH_TX_OUT, "clk_i2s3_8ch_tx_out", mux_i2s3_8ch_tx_out_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(64), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(12), 7, GFLAGS), + + COMPOSITE(SCLK_I2S3_8CH_RX_SRC, "clk_i2s3_8ch_rx_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(66), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(12), 8, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s3_8ch_rx_frac", "clk_i2s3_8ch_rx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(67), 0, + RK3308_CLKGATE_CON(12), 9, GFLAGS, + &rk3308_i2s3_8ch_rx_fracmux), + COMPOSITE_NODIV(SCLK_I2S3_8CH_RX, "clk_i2s3_8ch_rx", mux_i2s3_8ch_rx_tx_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(66), 12, 1, MFLAGS, + RK3308_CLKGATE_CON(12), 10, GFLAGS), + GATE(SCLK_I2S3_8CH_RX_OUT, "clk_i2s3_8ch_rx_out", "clk_i2s3_8ch_rx", 0, + RK3308_CLKGATE_CON(12), 11, GFLAGS), + + COMPOSITE(SCLK_I2S0_2CH_SRC, "clk_i2s0_2ch_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(68), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(12), 12, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s0_2ch_frac", "clk_i2s0_2ch_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(69), 0, + RK3308_CLKGATE_CON(12), 13, GFLAGS, + &rk3308_i2s0_2ch_fracmux), + GATE(SCLK_I2S0_2CH, "clk_i2s0_2ch", "clk_i2s0_2ch_mux", 0, + RK3308_CLKGATE_CON(12), 14, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S0_2CH_OUT, "clk_i2s0_2ch_out", mux_i2s0_2ch_out_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(68), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(12), 15, GFLAGS), + + COMPOSITE(SCLK_I2S1_2CH_SRC, "clk_i2s1_2ch_src", mux_vpll0_vpll1_xin24m_p, 0, + RK3308_CLKSEL_CON(70), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(13), 0, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s1_2ch_frac", "clk_i2s1_2ch_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(71), 0, + RK3308_CLKGATE_CON(13), 1, GFLAGS, + &rk3308_i2s1_2ch_fracmux), + GATE(SCLK_I2S1_2CH, "clk_i2s1_2ch", "clk_i2s1_2ch_mux", 0, + RK3308_CLKGATE_CON(13), 2, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S1_2CH_OUT, "clk_i2s1_2ch_out", mux_i2s1_2ch_out_p, CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(70), 15, 1, MFLAGS, + RK3308_CLKGATE_CON(13), 3, GFLAGS), + + COMPOSITE(SCLK_SPDIF_TX_DIV, "clk_spdif_tx_div", mux_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(48), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(10), 6, GFLAGS), + COMPOSITE(SCLK_SPDIF_TX_DIV50, "clk_spdif_tx_div50", mux_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(48), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(10), 6, GFLAGS), + MUX(0, "clk_spdif_tx_src", mux_spdif_tx_src_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3308_CLKSEL_CON(48), 12, 1, MFLAGS), + COMPOSITE_FRACMUX(0, "clk_spdif_tx_frac", "clk_spdif_tx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(49), 0, + RK3308_CLKGATE_CON(10), 7, GFLAGS, + &rk3308_spdif_tx_fracmux), + GATE(SCLK_SPDIF_TX, "clk_spdif_tx", "clk_spdif_tx_mux", 0, + RK3308_CLKGATE_CON(10), 8, GFLAGS), + + COMPOSITE(SCLK_SPDIF_RX_DIV, "clk_spdif_rx_div", mux_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(10), 9, GFLAGS), + COMPOSITE(SCLK_SPDIF_RX_DIV50, "clk_spdif_rx_div50", mux_vpll0_vpll1_xin24m_p, CLK_IGNORE_UNUSED, + RK3308_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3308_CLKGATE_CON(10), 9, GFLAGS), + MUX(0, "clk_spdif_rx_src", mux_spdif_rx_src_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3308_CLKSEL_CON(50), 14, 1, MFLAGS), + COMPOSITE_FRACMUX(0, "clk_spdif_rx_frac", "clk_spdif_rx_src", CLK_SET_RATE_PARENT, + RK3308_CLKSEL_CON(51), 0, + RK3308_CLKGATE_CON(10), 10, GFLAGS, + &rk3308_spdif_rx_fracmux), + GATE(SCLK_SPDIF_RX, "clk_spdif_rx", "clk_spdif_rx_mux", 0, + RK3308_CLKGATE_CON(10), 11, GFLAGS), + + /* + * Clock-Architecture Diagram 8 + */ + + GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(0), 5, GFLAGS), + GATE(0, "pclk_core_dbg_niu", "aclk_core", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(0), 6, GFLAGS), + GATE(0, "pclk_core_dbg_daplite", "pclk_core_dbg", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(0), 7, GFLAGS), + GATE(0, "aclk_core_perf", "pclk_core_dbg", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(0), 8, GFLAGS), + GATE(0, "pclk_core_grf", "pclk_core_dbg", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(0), 9, GFLAGS), + + GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(9), 2, GFLAGS), + GATE(0, "aclk_peribus_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(9), 3, GFLAGS), + GATE(ACLK_MAC, "aclk_mac", "aclk_peri", 0, RK3308_CLKGATE_CON(9), 4, GFLAGS), + + GATE(0, "hclk_peri_niu", "hclk_peri", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(9), 5, GFLAGS), + GATE(HCLK_NANDC, "hclk_nandc", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 6, GFLAGS), + GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 7, GFLAGS), + GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 8, GFLAGS), + GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 9, GFLAGS), + GATE(HCLK_SFC, "hclk_sfc", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 10, GFLAGS), + GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 11, GFLAGS), + GATE(HCLK_HOST, "hclk_host", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 12, GFLAGS), + GATE(HCLK_HOST_ARB, "hclk_host_arb", "hclk_peri", 0, RK3308_CLKGATE_CON(9), 13, GFLAGS), + + GATE(0, "pclk_peri_niu", "pclk_peri", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(9), 14, GFLAGS), + GATE(PCLK_MAC, "pclk_mac", "pclk_peri", 0, RK3308_CLKGATE_CON(9), 15, GFLAGS), + + GATE(0, "hclk_audio_niu", "hclk_audio", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(14), 0, GFLAGS), + GATE(HCLK_PDM, "hclk_pdm", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 1, GFLAGS), + GATE(HCLK_SPDIFTX, "hclk_spdiftx", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 2, GFLAGS), + GATE(HCLK_SPDIFRX, "hclk_spdifrx", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 3, GFLAGS), + GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 4, GFLAGS), + GATE(HCLK_I2S1_8CH, "hclk_i2s1_8ch", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 5, GFLAGS), + GATE(HCLK_I2S2_8CH, "hclk_i2s2_8ch", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 6, GFLAGS), + GATE(HCLK_I2S3_8CH, "hclk_i2s3_8ch", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 7, GFLAGS), + GATE(HCLK_I2S0_2CH, "hclk_i2s0_2ch", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 8, GFLAGS), + GATE(HCLK_I2S1_2CH, "hclk_i2s1_2ch", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 9, GFLAGS), + GATE(HCLK_VAD, "hclk_vad", "hclk_audio", 0, RK3308_CLKGATE_CON(14), 10, GFLAGS), + + GATE(0, "pclk_audio_niu", "pclk_audio", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(14), 11, GFLAGS), + GATE(PCLK_ACODEC, "pclk_acodec", "pclk_audio", 0, RK3308_CLKGATE_CON(14), 12, GFLAGS), + + GATE(0, "aclk_bus_niu", "aclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(5), 0, GFLAGS), + GATE(0, "aclk_intmem", "aclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(5), 1, GFLAGS), + GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_bus", 0, RK3308_CLKGATE_CON(5), 2, GFLAGS), + GATE(ACLK_VOP, "aclk_vop", "aclk_bus", 0, RK3308_CLKGATE_CON(5), 3, GFLAGS), + GATE(0, "aclk_gic", "aclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(5), 4, GFLAGS), + /* aclk_dmaci0 is controlled by sgrf_clkgat_con. */ + SGRF_GATE(ACLK_DMAC0, "aclk_dmac0", "aclk_bus"), + /* aclk_dmac1 is controlled by sgrf_clkgat_con. */ + SGRF_GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_bus"), + /* watchdog pclk is controlled by sgrf_clkgat_con. */ + SGRF_GATE(PCLK_WDT, "pclk_wdt", "pclk_bus"), + + GATE(0, "hclk_bus_niu", "hclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(5), 5, GFLAGS), + GATE(0, "hclk_rom", "hclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(5), 6, GFLAGS), + GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_bus", 0, RK3308_CLKGATE_CON(5), 7, GFLAGS), + GATE(HCLK_VOP, "hclk_vop", "hclk_bus", 0, RK3308_CLKGATE_CON(5), 8, GFLAGS), + + GATE(0, "pclk_bus_niu", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(5), 9, GFLAGS), + GATE(PCLK_UART0, "pclk_uart0", "pclk_bus", 0, RK3308_CLKGATE_CON(5), 10, GFLAGS), + GATE(PCLK_UART1, "pclk_uart1", "pclk_bus", 0, RK3308_CLKGATE_CON(5), 11, GFLAGS), + GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3308_CLKGATE_CON(5), 12, GFLAGS), + GATE(PCLK_UART3, "pclk_uart3", "pclk_bus", 0, RK3308_CLKGATE_CON(5), 13, GFLAGS), + GATE(PCLK_UART4, "pclk_uart4", "pclk_bus", 0, RK3308_CLKGATE_CON(5), 14, GFLAGS), + GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0, RK3308_CLKGATE_CON(5), 15, GFLAGS), + GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 0, GFLAGS), + GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 1, GFLAGS), + GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 2, GFLAGS), + GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 3, GFLAGS), + GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 4, GFLAGS), + GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 5, GFLAGS), + GATE(PCLK_SPI2, "pclk_spi2", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 6, GFLAGS), + GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 7, GFLAGS), + GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 8, GFLAGS), + GATE(PCLK_TIMER, "pclk_timer", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 9, GFLAGS), + GATE(PCLK_OTP_NS, "pclk_otp_ns", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 10, GFLAGS), + GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 12, GFLAGS), + GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 13, GFLAGS), + GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 14, GFLAGS), + GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus", 0, RK3308_CLKGATE_CON(6), 15, GFLAGS), + GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_bus", 0, RK3308_CLKGATE_CON(7), 0, GFLAGS), + GATE(PCLK_SGRF, "pclk_sgrf", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 1, GFLAGS), + GATE(PCLK_GRF, "pclk_grf", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 2, GFLAGS), + GATE(PCLK_USBSD_DET, "pclk_usbsd_det", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 3, GFLAGS), + GATE(PCLK_DDR_UPCTL, "pclk_ddr_upctl", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 4, GFLAGS), + GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 5, GFLAGS), + GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 6, GFLAGS), + GATE(PCLK_DDR_STDBY, "pclk_ddr_stdby", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 7, GFLAGS), + GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 8, GFLAGS), + GATE(PCLK_CRU, "pclk_cru", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 9, GFLAGS), + GATE(PCLK_OTP_PHY, "pclk_otp_phy", "pclk_bus", 0, RK3308_CLKGATE_CON(7), 10, GFLAGS), + GATE(PCLK_CPU_BOOST, "pclk_cpu_boost", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 11, GFLAGS), + GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 12, GFLAGS), + GATE(PCLK_PWM2, "pclk_pwm2", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 13, GFLAGS), + GATE(PCLK_CAN, "pclk_can", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 14, GFLAGS), + GATE(PCLK_OWIRE, "pclk_owire", "pclk_bus", CLK_IGNORE_UNUSED, RK3308_CLKGATE_CON(7), 15, GFLAGS), +}; + +static const char *const rk3308_critical_clocks[] __initconst = { + "aclk_bus", + "hclk_bus", + "pclk_bus", + "aclk_peri", + "hclk_peri", + "pclk_peri", + "hclk_audio", + "pclk_audio", + "sclk_ddrc", +}; + +static void __init rk3308_clk_init(struct device_node *np) +{ + struct rockchip_clk_provider *ctx; + void __iomem *reg_base; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: could not map cru region\n", __func__); + return; + } + + ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + if (IS_ERR(ctx)) { + pr_err("%s: rockchip clk init failed\n", __func__); + iounmap(reg_base); + return; + } + + rockchip_clk_register_plls(ctx, rk3308_pll_clks, + ARRAY_SIZE(rk3308_pll_clks), + RK3308_GRF_SOC_STATUS0); + rockchip_clk_register_branches(ctx, rk3308_clk_branches, + ARRAY_SIZE(rk3308_clk_branches)); + rockchip_clk_protect_critical(rk3308_critical_clocks, + ARRAY_SIZE(rk3308_critical_clocks)); + + rockchip_clk_register_armclk(ctx, ARMCLK, "armclk", + mux_armclk_p, ARRAY_SIZE(mux_armclk_p), + &rk3308_cpuclk_data, rk3308_cpuclk_rates, + ARRAY_SIZE(rk3308_cpuclk_rates)); + + rockchip_register_softrst(np, 10, reg_base + RK3308_SOFTRST_CON(0), + ROCKCHIP_SOFTRST_HIWORD_MASK); + + rockchip_register_restart_notifier(ctx, RK3308_GLB_SRST_FST, NULL); + + rockchip_clk_of_add_provider(np, ctx); +} + +CLK_OF_DECLARE(rk3308_cru, "rockchip,rk3308-cru", rk3308_clk_init); diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c index 96cc6af5632c49..5947d319286682 100644 --- a/drivers/clk/rockchip/clk-rv1108.c +++ b/drivers/clk/rockchip/clk-rv1108.c @@ -122,7 +122,6 @@ PNAME(mux_usb480m_pre_p) = { "usbphy", "xin24m" }; PNAME(mux_hdmiphy_phy_p) = { "hdmiphy", "xin24m" }; PNAME(mux_dclk_hdmiphy_pre_p) = { "dclk_hdmiphy_src_gpll", "dclk_hdmiphy_src_dpll" }; PNAME(mux_pll_src_4plls_p) = { "dpll", "gpll", "hdmiphy", "usb480m" }; -PNAME(mux_pll_src_3plls_p) = { "apll", "gpll", "dpll" }; PNAME(mux_pll_src_2plls_p) = { "dpll", "gpll" }; PNAME(mux_pll_src_apll_gpll_p) = { "apll", "gpll" }; PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_gpll", "aclk_peri_src_dpll" }; diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index b811597a3d38ea..2271a84124b07f 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -121,6 +121,19 @@ struct clk; #define RK3288_EMMC_CON0 0x218 #define RK3288_EMMC_CON1 0x21c +#define RK3308_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3308_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3308_CLKGATE_CON(x) ((x) * 0x4 + 0x300) +#define RK3308_GLB_SRST_FST 0xb8 +#define RK3308_SOFTRST_CON(x) ((x) * 0x4 + 0x400) +#define RK3308_MODE_CON 0xa0 +#define RK3308_SDMMC_CON0 0x480 +#define RK3308_SDMMC_CON1 0x484 +#define RK3308_SDIO_CON0 0x488 +#define RK3308_SDIO_CON1 0x48c +#define RK3308_EMMC_CON0 0x490 +#define RK3308_EMMC_CON1 0x494 + #define RK3328_PLL_CON(x) RK2928_PLL_CON(x) #define RK3328_CLKSEL_CON(x) ((x) * 0x4 + 0x100) #define RK3328_CLKGATE_CON(x) ((x) * 0x4 + 0x200) diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c index ad7951b6b285eb..dcf4e25a021685 100644 --- a/drivers/clk/sirf/clk-common.c +++ b/drivers/clk/sirf/clk-common.c @@ -297,9 +297,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw); /* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return 4; WARN_ON((cfg & (BIT(3) - 1)) > 4); @@ -311,9 +312,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw); /* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return -EINVAL; cfg &= ~(BIT(3) - 1); @@ -353,7 +355,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate, { unsigned long fin; unsigned ratio, wait, hold; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; fin = *parent_rate; ratio = fin / rate; @@ -375,7 +378,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_dmn *clk = to_dmnclk(hw); unsigned long fin; unsigned ratio, wait, hold, reg; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; fin = parent_rate; ratio = fin / rate; diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 3966cd43b55273..43ecd507bf836b 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -30,22 +30,23 @@ static u8 socfpga_clk_get_parent(struct clk_hw *hwclk) { u32 l4_src; u32 perpll_src; + const char *name = clk_hw_get_name(hwclk); - if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) { + if (streq(name, SOCFPGA_L4_MP_CLK)) { l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); return l4_src &= 0x1; } - if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) { + if (streq(name, SOCFPGA_L4_SP_CLK)) { l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); return !!(l4_src & 2); } perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) + if (streq(name, SOCFPGA_MMC_CLK)) return perpll_src &= 0x3; - if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) || - streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) - return (perpll_src >> 2) & 3; + if (streq(name, SOCFPGA_NAND_CLK) || + streq(name, SOCFPGA_NAND_X_CLK)) + return (perpll_src >> 2) & 3; /* QSPI clock */ return (perpll_src >> 4) & 3; @@ -55,24 +56,25 @@ static u8 socfpga_clk_get_parent(struct clk_hw *hwclk) static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent) { u32 src_reg; + const char *name = clk_hw_get_name(hwclk); - if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) { + if (streq(name, SOCFPGA_L4_MP_CLK)) { src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); src_reg &= ~0x1; src_reg |= parent; writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); - } else if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) { + } else if (streq(name, SOCFPGA_L4_SP_CLK)) { src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); src_reg &= ~0x2; src_reg |= (parent << 1); writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); } else { src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) { + if (streq(name, SOCFPGA_MMC_CLK)) { src_reg &= ~0x3; src_reg |= parent; - } else if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) || - streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) { + } else if (streq(name, SOCFPGA_NAND_CLK) || + streq(name, SOCFPGA_NAND_X_CLK)) { src_reg &= ~0xC; src_reg |= (parent << 2); } else {/* QSPI clock */ diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c index a8ff7229611d98..3e0c55727b89cf 100644 --- a/drivers/clk/socfpga/clk-periph-a10.c +++ b/drivers/clk/socfpga/clk-periph-a10.c @@ -40,11 +40,12 @@ static u8 clk_periclk_get_parent(struct clk_hw *hwclk) { struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk); u32 clk_src; + const char *name = clk_hw_get_name(hwclk); clk_src = readl(socfpgaclk->hw.reg); - if (streq(hwclk->init->name, SOCFPGA_MPU_FREE_CLK) || - streq(hwclk->init->name, SOCFPGA_NOC_FREE_CLK) || - streq(hwclk->init->name, SOCFPGA_SDMMC_FREE_CLK)) + if (streq(name, SOCFPGA_MPU_FREE_CLK) || + streq(name, SOCFPGA_NOC_FREE_CLK) || + streq(name, SOCFPGA_SDMMC_FREE_CLK)) return (clk_src >> CLK_MGR_FREE_SHIFT) & CLK_MGR_FREE_MASK; else diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c index e5bc8c828cf0d8..9163bbb464112e 100644 --- a/drivers/clk/spear/spear1340_clock.c +++ b/drivers/clk/spear/spear1340_clock.c @@ -335,7 +335,7 @@ static const struct aux_clk_masks i2s_prs1_masks = { }; /* i2s sclk (bit clock) syynthesizers masks */ -static struct aux_clk_masks i2s_sclk_masks = { +static const struct aux_clk_masks i2s_sclk_masks = { .eq_sel_mask = AUX_EQ_SEL_MASK, .eq_sel_shift = SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT, .eq1_mask = AUX_EQ1_SEL, diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c index a5bdca1de5d074..9d56eac43832ac 100644 --- a/drivers/clk/sprd/common.c +++ b/drivers/clk/sprd/common.c @@ -76,16 +76,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw) struct clk_hw *hw; for (i = 0; i < clkhw->num; i++) { + const char *name; hw = clkhw->hws[i]; - if (!hw) continue; + name = hw->init->name; ret = devm_clk_hw_register(dev, hw); if (ret) { dev_err(dev, "Couldn't register clock %d - %s\n", - i, hw->init->name); + i, name); return ret; } } diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c index 36b4402bf09e36..640270f51aa56c 100644 --- a/drivers/clk/sprd/pll.c +++ b/drivers/clk/sprd/pll.c @@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, k2 + refin * nint * CLK_PLL_1M; } + kfree(cfg); return rate; } @@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll, if (!ret) udelay(pll->udelay); + kfree(cfg); return ret; } diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index d18e49b4976f53..4413b6e04a8ecb 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -326,6 +326,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) return; reg = of_iomap(pnode, 0); + of_node_put(pnode); if (!reg) return; diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index ca1ccdb8a3b18a..a156bd0c6af751 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -67,7 +67,6 @@ struct clkgen_quadfs_data { }; static const struct clk_ops st_quadfs_pll_c32_ops; -static const struct clk_ops st_quadfs_fs660c32_ops; static int clk_fs660c32_dig_get_params(unsigned long input, unsigned long output, struct stm_fs *fs); diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index d8a688bd45ecf1..c3952f2c42ba26 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -61,19 +61,6 @@ static const struct clk_ops stm_pll3200c32_ops; static const struct clk_ops stm_pll3200c32_a9_ops; static const struct clk_ops stm_pll4600c28_ops; -static const struct clkgen_pll_data st_pll3200c32_407_a0 = { - /* 407 A0 */ - .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), - .pdn_ctrl = CLKGEN_FIELD(0x2a0, 0x1, 8), - .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), - .ndiv = CLKGEN_FIELD(0x2a4, C32_NDIV_MASK, 16), - .idf = CLKGEN_FIELD(0x2a4, C32_IDF_MASK, 0x0), - .num_odfs = 1, - .odf = { CLKGEN_FIELD(0x2b4, C32_ODF_MASK, 0) }, - .odf_gate = { CLKGEN_FIELD(0x2b4, 0x1, 6) }, - .ops = &stm_pll3200c32_ops, -}; - static const struct clkgen_pll_data st_pll3200c32_cx_0 = { /* 407 C0 PLL0 */ .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index aebef4af98613f..d89353a3cdec72 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -505,7 +505,7 @@ static struct ccu_div i2s3_clk = { .hw.init = CLK_HW_INIT_PARENTS("i2s3", audio_parents, &ccu_div_ops, - 0), + CLK_SET_RATE_PARENT), }, }; @@ -518,7 +518,7 @@ static struct ccu_div i2s0_clk = { .hw.init = CLK_HW_INIT_PARENTS("i2s0", audio_parents, &ccu_div_ops, - 0), + CLK_SET_RATE_PARENT), }, }; @@ -531,7 +531,7 @@ static struct ccu_div i2s1_clk = { .hw.init = CLK_HW_INIT_PARENTS("i2s1", audio_parents, &ccu_div_ops, - 0), + CLK_SET_RATE_PARENT), }, }; @@ -544,7 +544,7 @@ static struct ccu_div i2s2_clk = { .hw.init = CLK_HW_INIT_PARENTS("i2s2", audio_parents, &ccu_div_ops, - 0), + CLK_SET_RATE_PARENT), }, }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 9b3939fc7faa61..5c779eec454b6e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -77,7 +77,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", BIT(28), /* lock */ 0); -static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr", +static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0", "osc24M", 0x020, 8, 5, /* N */ 4, 2, /* K */ @@ -116,6 +116,14 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1", 2, /* post-div */ 0); +static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1", + "osc24M", 0x04c, + 8, 7, /* N */ + 0, 2, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + static const char * const cpu_parents[] = { "osc32k", "osc24M", "pll-cpu", "pll-cpu" }; static SUNXI_CCU_MUX(cpu_clk, "cpu", cpu_parents, @@ -227,6 +235,8 @@ static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1", 0x068, BIT(0), 0); static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1", 0x068, BIT(5), 0); +static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1", + 0x068, BIT(12), 0); static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x06c, BIT(0), 0); @@ -298,12 +308,18 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0, BIT(31), /* gate */ 0); +static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", + "pll-audio-2x", "pll-audio" }; +static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents, + 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", 0x0cc, BIT(8), 0); static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M", 0x0cc, BIT(16), 0); -static const char * const dram_parents[] = { "pll-ddr", "pll-periph0-2x" }; +static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1", + "pll-periph0-2x" }; static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents, 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL); @@ -363,10 +379,11 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = { &pll_audio_base_clk.common, &pll_video_clk.common, &pll_ve_clk.common, - &pll_ddr_clk.common, + &pll_ddr0_clk.common, &pll_periph0_clk.common, &pll_isp_clk.common, &pll_periph1_clk.common, + &pll_ddr1_clk.common, &cpu_clk.common, &axi_clk.common, &ahb1_clk.common, @@ -433,6 +450,80 @@ static const struct clk_hw *clk_parent_pll_audio[] = { &pll_audio_base_clk.common.hw }; +static struct ccu_common *sun8i_v3_ccu_clks[] = { + &pll_cpu_clk.common, + &pll_audio_base_clk.common, + &pll_video_clk.common, + &pll_ve_clk.common, + &pll_ddr0_clk.common, + &pll_periph0_clk.common, + &pll_isp_clk.common, + &pll_periph1_clk.common, + &pll_ddr1_clk.common, + &cpu_clk.common, + &axi_clk.common, + &ahb1_clk.common, + &apb1_clk.common, + &apb2_clk.common, + &ahb2_clk.common, + &bus_ce_clk.common, + &bus_dma_clk.common, + &bus_mmc0_clk.common, + &bus_mmc1_clk.common, + &bus_mmc2_clk.common, + &bus_dram_clk.common, + &bus_emac_clk.common, + &bus_hstimer_clk.common, + &bus_spi0_clk.common, + &bus_otg_clk.common, + &bus_ehci0_clk.common, + &bus_ohci0_clk.common, + &bus_ve_clk.common, + &bus_tcon0_clk.common, + &bus_csi_clk.common, + &bus_de_clk.common, + &bus_codec_clk.common, + &bus_pio_clk.common, + &bus_i2s0_clk.common, + &bus_i2c0_clk.common, + &bus_i2c1_clk.common, + &bus_uart0_clk.common, + &bus_uart1_clk.common, + &bus_uart2_clk.common, + &bus_ephy_clk.common, + &bus_dbg_clk.common, + &mmc0_clk.common, + &mmc0_sample_clk.common, + &mmc0_output_clk.common, + &mmc1_clk.common, + &mmc1_sample_clk.common, + &mmc1_output_clk.common, + &mmc2_clk.common, + &mmc2_sample_clk.common, + &mmc2_output_clk.common, + &ce_clk.common, + &spi0_clk.common, + &i2s0_clk.common, + &usb_phy0_clk.common, + &usb_ohci0_clk.common, + &dram_clk.common, + &dram_ve_clk.common, + &dram_csi_clk.common, + &dram_ohci_clk.common, + &dram_ehci_clk.common, + &de_clk.common, + &tcon_clk.common, + &csi_misc_clk.common, + &csi0_mclk_clk.common, + &csi1_sclk_clk.common, + &csi1_mclk_clk.common, + &ve_clk.common, + &ac_dig_clk.common, + &avs_clk.common, + &mbus_clk.common, + &mipi_csi_clk.common, +}; + /* We hardcode the divider to 4 for now */ static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", clk_parent_pll_audio, @@ -460,11 +551,12 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, [CLK_PLL_VIDEO] = &pll_video_clk.common.hw, [CLK_PLL_VE] = &pll_ve_clk.common.hw, - [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw, [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, [CLK_PLL_ISP] = &pll_isp_clk.common.hw, [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, + [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw, [CLK_CPU] = &cpu_clk.common.hw, [CLK_AXI] = &axi_clk.common.hw, [CLK_AHB1] = &ahb1_clk.common.hw, @@ -502,6 +594,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_MMC1] = &mmc1_clk.common.hw, [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw, + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw, [CLK_CE] = &ce_clk.common.hw, [CLK_SPI0] = &spi0_clk.common.hw, [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, @@ -526,6 +621,88 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { .num = CLK_NUMBER, }; +static struct clk_hw_onecell_data sun8i_v3_hw_clks = { + .hws = { + [CLK_PLL_CPU] = &pll_cpu_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO] = &pll_video_clk.common.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw, + [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, + [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, + [CLK_PLL_ISP] = &pll_isp_clk.common.hw, + [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, + [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB1] = &ahb1_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_APB2] = &apb2_clk.common.hw, + [CLK_AHB2] = &ahb2_clk.common.hw, + [CLK_BUS_CE] = &bus_ce_clk.common.hw, + [CLK_BUS_DMA] = &bus_dma_clk.common.hw, + [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw, + [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw, + [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw, + [CLK_BUS_DRAM] = &bus_dram_clk.common.hw, + [CLK_BUS_EMAC] = &bus_emac_clk.common.hw, + [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw, + [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw, + [CLK_BUS_OTG] = &bus_otg_clk.common.hw, + [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw, + [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw, + [CLK_BUS_VE] = &bus_ve_clk.common.hw, + [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw, + [CLK_BUS_CSI] = &bus_csi_clk.common.hw, + [CLK_BUS_DE] = &bus_de_clk.common.hw, + [CLK_BUS_CODEC] = &bus_codec_clk.common.hw, + [CLK_BUS_PIO] = &bus_pio_clk.common.hw, + [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw, + [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw, + [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw, + [CLK_BUS_UART0] = &bus_uart0_clk.common.hw, + [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, + [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, + [CLK_BUS_EPHY] = &bus_ephy_clk.common.hw, + [CLK_BUS_DBG] = &bus_dbg_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw, + [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, + [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw, + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw, + [CLK_CE] = &ce_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_I2S0] = &i2s0_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_DRAM] = &dram_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, + [CLK_DRAM_EHCI] = &dram_ehci_clk.common.hw, + [CLK_DRAM_OHCI] = &dram_ohci_clk.common.hw, + [CLK_DE] = &de_clk.common.hw, + [CLK_TCON0] = &tcon_clk.common.hw, + [CLK_CSI_MISC] = &csi_misc_clk.common.hw, + [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw, + [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw, + [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_AC_DIG] = &ac_dig_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_USB_PHY0] = { 0x0cc, BIT(0) }, @@ -561,6 +738,42 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_BUS_UART2] = { 0x2d8, BIT(18) }, }; +static struct ccu_reset_map sun8i_v3_ccu_resets[] = { + [RST_USB_PHY0] = { 0x0cc, BIT(0) }, + + [RST_MBUS] = { 0x0fc, BIT(31) }, + + [RST_BUS_CE] = { 0x2c0, BIT(5) }, + [RST_BUS_DMA] = { 0x2c0, BIT(6) }, + [RST_BUS_MMC0] = { 0x2c0, BIT(8) }, + [RST_BUS_MMC1] = { 0x2c0, BIT(9) }, + [RST_BUS_MMC2] = { 0x2c0, BIT(10) }, + [RST_BUS_DRAM] = { 0x2c0, BIT(14) }, + [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, + [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, + [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, + [RST_BUS_OTG] = { 0x2c0, BIT(24) }, + [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, + [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, + + [RST_BUS_VE] = { 0x2c4, BIT(0) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(4) }, + [RST_BUS_CSI] = { 0x2c4, BIT(8) }, + [RST_BUS_DE] = { 0x2c4, BIT(12) }, + [RST_BUS_DBG] = { 0x2c4, BIT(31) }, + + [RST_BUS_EPHY] = { 0x2c8, BIT(2) }, + + [RST_BUS_CODEC] = { 0x2d0, BIT(0) }, + [RST_BUS_I2S0] = { 0x2d0, BIT(12) }, + + [RST_BUS_I2C0] = { 0x2d8, BIT(0) }, + [RST_BUS_I2C1] = { 0x2d8, BIT(1) }, + [RST_BUS_UART0] = { 0x2d8, BIT(16) }, + [RST_BUS_UART1] = { 0x2d8, BIT(17) }, + [RST_BUS_UART2] = { 0x2d8, BIT(18) }, +}; + static const struct sunxi_ccu_desc sun8i_v3s_ccu_desc = { .ccu_clks = sun8i_v3s_ccu_clks, .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_ccu_clks), @@ -571,7 +784,18 @@ static const struct sunxi_ccu_desc sun8i_v3s_ccu_desc = { .num_resets = ARRAY_SIZE(sun8i_v3s_ccu_resets), }; -static void __init sun8i_v3s_ccu_setup(struct device_node *node) +static const struct sunxi_ccu_desc sun8i_v3_ccu_desc = { + .ccu_clks = sun8i_v3_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_v3_ccu_clks), + + .hw_clks = &sun8i_v3_hw_clks, + + .resets = sun8i_v3_ccu_resets, + .num_resets = ARRAY_SIZE(sun8i_v3_ccu_resets), +}; + +static void __init sun8i_v3_v3s_ccu_init(struct device_node *node, + const struct sunxi_ccu_desc *ccu_desc) { void __iomem *reg; u32 val; @@ -587,7 +811,21 @@ static void __init sun8i_v3s_ccu_setup(struct device_node *node) val &= ~GENMASK(19, 16); writel(val | (3 << 16), reg + SUN8I_V3S_PLL_AUDIO_REG); - sunxi_ccu_probe(node, reg, &sun8i_v3s_ccu_desc); + sunxi_ccu_probe(node, reg, ccu_desc); +} + +static void __init sun8i_v3s_ccu_setup(struct device_node *node) +{ + sun8i_v3_v3s_ccu_init(node, &sun8i_v3s_ccu_desc); +} + +static void __init sun8i_v3_ccu_setup(struct device_node *node) +{ + sun8i_v3_v3s_ccu_init(node, &sun8i_v3_ccu_desc); } + CLK_OF_DECLARE(sun8i_v3s_ccu, "allwinner,sun8i-v3s-ccu", sun8i_v3s_ccu_setup); + +CLK_OF_DECLARE(sun8i_v3_ccu, "allwinner,sun8i-v3-ccu", + sun8i_v3_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h index fbc1da8b452050..b0160d305a6775 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h @@ -20,7 +20,7 @@ #define CLK_PLL_AUDIO_8X 5 #define CLK_PLL_VIDEO 6 #define CLK_PLL_VE 7 -#define CLK_PLL_DDR 8 +#define CLK_PLL_DDR0 8 #define CLK_PLL_PERIPH0 9 #define CLK_PLL_PERIPH0_2X 10 #define CLK_PLL_ISP 11 @@ -49,6 +49,8 @@ /* And the GPU module clock is exported */ -#define CLK_NUMBER (CLK_MIPI_CSI + 1) +#define CLK_PLL_DDR1 74 + +#define CLK_NUMBER (CLK_I2S0 + 1) #endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index 7fe3ac980e5f93..2e20e650b6c01b 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -97,14 +97,15 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, for (i = 0; i < desc->hw_clks->num ; i++) { struct clk_hw *hw = desc->hw_clks->hws[i]; + const char *name; if (!hw) continue; + name = hw->init->name; ret = of_clk_hw_register(node, hw); if (ret) { - pr_err("Couldn't register clock %d - %s\n", - i, clk_hw_get_name(hw)); + pr_err("Couldn't register clock %d - %s\n", i, name); goto err_clk_unreg; } } diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 015a657d33829e..ac5bc8857a5145 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -140,6 +140,7 @@ static void __init omap_clk_register_apll(void *user, struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); struct dpll_data *ad = clk_hw->dpll_data; struct clk *clk; + const struct clk_init_data *init = clk_hw->hw.init; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { @@ -168,15 +169,15 @@ static void __init omap_clk_register_apll(void *user, clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name); if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); - kfree(clk_hw->hw.init->parent_names); - kfree(clk_hw->hw.init); + kfree(init->parent_names); + kfree(init); return; } cleanup: kfree(clk_hw->dpll_data); - kfree(clk_hw->hw.init->parent_names); - kfree(clk_hw->hw.init); + kfree(init->parent_names); + kfree(init); kfree(clk_hw); } diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index dafef7e70ba834..e675e27f1203cb 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -314,6 +314,39 @@ static const struct omap_clkctrl_reg_data omap5_dss_clkctrl_regs[] __initconst = { 0 }, }; +static const char * const omap5_gpu_core_mux_parents[] __initconst = { + "dpll_core_h14x2_ck", + "dpll_per_h14x2_ck", + NULL, +}; + +static const char * const omap5_gpu_hyd_mux_parents[] __initconst = { + "dpll_core_h14x2_ck", + "dpll_per_h14x2_ck", + NULL, +}; + +static const char * const omap5_gpu_sys_clk_parents[] __initconst = { + "sys_clkin", + NULL, +}; + +static const struct omap_clkctrl_div_data omap5_gpu_sys_clk_data __initconst = { + .max_div = 2, +}; + +static const struct omap_clkctrl_bit_data omap5_gpu_core_bit_data[] __initconst = { + { 24, TI_CLK_MUX, omap5_gpu_core_mux_parents, NULL }, + { 25, TI_CLK_MUX, omap5_gpu_hyd_mux_parents, NULL }, + { 26, TI_CLK_DIVIDER, omap5_gpu_sys_clk_parents, &omap5_gpu_sys_clk_data }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data omap5_gpu_clkctrl_regs[] __initconst = { + { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24" }, + { 0 }, +}; + static const char * const omap5_mmc1_fclk_mux_parents[] __initconst = { "func_128m_clk", "dpll_per_m2x2_ck", @@ -470,6 +503,7 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = { { 0x4a008e20, omap5_l3instr_clkctrl_regs }, { 0x4a009020, omap5_l4per_clkctrl_regs }, { 0x4a009420, omap5_dss_clkctrl_regs }, + { 0x4a009520, omap5_gpu_clkctrl_regs }, { 0x4a009620, omap5_l3init_clkctrl_regs }, { 0x4ae07920, omap5_wkupaon_clkctrl_regs }, { 0 }, diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c index e8cee6f3b4a033..087cfa75ac2484 100644 --- a/drivers/clk/ti/clk-814x.c +++ b/drivers/clk/ti/clk-814x.c @@ -66,6 +66,7 @@ static int __init dm814x_adpll_early_init(void) } of_platform_populate(np, NULL, NULL, NULL); + of_node_put(np); return 0; } diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 659dadb23279ca..247510e306e2a8 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -165,6 +165,7 @@ static void __init _register_dpll(void *user, struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); struct dpll_data *dd = clk_hw->dpll_data; struct clk *clk; + const struct clk_init_data *init = hw->init; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { @@ -196,15 +197,15 @@ static void __init _register_dpll(void *user, if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); - kfree(clk_hw->hw.init->parent_names); - kfree(clk_hw->hw.init); + kfree(init->parent_names); + kfree(init); return; } cleanup: kfree(clk_hw->dpll_data); - kfree(clk_hw->hw.init->parent_names); - kfree(clk_hw->hw.init); + kfree(init->parent_names); + kfree(init); kfree(clk_hw); } @@ -291,14 +292,12 @@ static void __init of_ti_dpll_setup(struct device_node *node, struct dpll_data *dd = NULL; u8 dpll_mode = 0; - dd = kzalloc(sizeof(*dd), GFP_KERNEL); + dd = kmemdup(ddt, sizeof(*dd), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); init = kzalloc(sizeof(*init), GFP_KERNEL); if (!dd || !clk_hw || !init) goto cleanup; - memcpy(dd, ddt, sizeof(*dd)); - clk_hw->dpll_data = dd; clk_hw->ops = &clkhwops_omap3_dpll; clk_hw->hw.init = init; diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c index 90bb0b041b7a9d..fd54d5c0251cc7 100644 --- a/drivers/clk/versatile/clk-versatile.c +++ b/drivers/clk/versatile/clk-versatile.c @@ -70,6 +70,7 @@ static void __init cm_osc_setup(struct device_node *np, return; } cm_base = of_iomap(parent, 0); + of_node_put(parent); if (!cm_base) { pr_err("could not remap core module base\n"); return; diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c index fd6c347bec6a74..dd7045bc48c152 100644 --- a/drivers/clk/zte/clk-zx296718.c +++ b/drivers/clk/zte/clk-zx296718.c @@ -564,6 +564,7 @@ static int __init top_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -573,11 +574,10 @@ static int __init top_clocks_init(struct device_node *np) for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) { zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base; + name = zx296718_pll_clk[i].hw.init->name; ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw); - if (ret) { - pr_warn("top clk %s init error!\n", - zx296718_pll_clk[i].hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) { @@ -585,11 +585,10 @@ static int __init top_clocks_init(struct device_node *np) top_hw_onecell_data.hws[top_ffactor_clk[i].id] = &top_ffactor_clk[i].factor.hw; + name = top_ffactor_clk[i].factor.hw.init->name; ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_ffactor_clk[i].factor.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) { @@ -598,11 +597,10 @@ static int __init top_clocks_init(struct device_node *np) &top_mux_clk[i].mux.hw; top_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = top_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) { @@ -611,11 +609,10 @@ static int __init top_clocks_init(struct device_node *np) &top_gate_clk[i].gate.hw; top_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = top_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) { @@ -624,11 +621,10 @@ static int __init top_clocks_init(struct device_node *np) &top_div_clk[i].div.hw; top_div_clk[i].div.reg += (uintptr_t)reg_base; + name = top_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &top_div_clk[i].div.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, @@ -754,6 +750,7 @@ static int __init lsp0_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -767,11 +764,10 @@ static int __init lsp0_clocks_init(struct device_node *np) &lsp0_mux_clk[i].mux.hw; lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = lsp0_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw); - if (ret) { - pr_warn("lsp0 clk %s init error!\n", - lsp0_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("lsp0 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) { @@ -780,11 +776,10 @@ static int __init lsp0_clocks_init(struct device_node *np) &lsp0_gate_clk[i].gate.hw; lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = lsp0_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw); - if (ret) { - pr_warn("lsp0 clk %s init error!\n", - lsp0_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("lsp0 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) { @@ -793,11 +788,10 @@ static int __init lsp0_clocks_init(struct device_node *np) &lsp0_div_clk[i].div.hw; lsp0_div_clk[i].div.reg += (uintptr_t)reg_base; + name = lsp0_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw); - if (ret) { - pr_warn("lsp0 clk %s init error!\n", - lsp0_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("lsp0 clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, @@ -862,6 +856,7 @@ static int __init lsp1_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -875,11 +870,10 @@ static int __init lsp1_clocks_init(struct device_node *np) &lsp0_mux_clk[i].mux.hw; lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = lsp1_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw); - if (ret) { - pr_warn("lsp1 clk %s init error!\n", - lsp1_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("lsp1 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) { @@ -888,11 +882,10 @@ static int __init lsp1_clocks_init(struct device_node *np) &lsp1_gate_clk[i].gate.hw; lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = lsp1_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw); - if (ret) { - pr_warn("lsp1 clk %s init error!\n", - lsp1_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("lsp1 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) { @@ -901,11 +894,10 @@ static int __init lsp1_clocks_init(struct device_node *np) &lsp1_div_clk[i].div.hw; lsp1_div_clk[i].div.reg += (uintptr_t)reg_base; + name = lsp1_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw); - if (ret) { - pr_warn("lsp1 clk %s init error!\n", - lsp1_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("lsp1 clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, @@ -979,6 +971,7 @@ static int __init audio_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -992,11 +985,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_mux_clk[i].mux.hw; audio_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = audio_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) { @@ -1005,11 +997,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_adiv_clk[i].hw; audio_adiv_clk[i].reg_base += (uintptr_t)reg_base; + name = audio_adiv_clk[i].hw.init->name; ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_adiv_clk[i].hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) { @@ -1018,11 +1009,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_div_clk[i].div.hw; audio_div_clk[i].div.reg += (uintptr_t)reg_base; + name = audio_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) { @@ -1031,11 +1021,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_gate_clk[i].gate.hw; audio_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = audio_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 16614d73a5fcf6..985633c08a2672 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -36,9 +36,9 @@ ifdef CONFIG_CC_IS_CLANG calcs_ccflags += -msse2 endif -CFLAGS_dcn_calcs.o := $(calcs_ccflags) -CFLAGS_dcn_calc_auto.o := $(calcs_ccflags) -CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare +CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags) +CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags) +CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index f57a3b281408cf..ddb8d5649e7918 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -16,10 +16,10 @@ else ifneq ($(call cc-option, -mstack-alignment=16),) cc_stack_align := -mstack-alignment=16 endif -CFLAGS_dcn20_resource.o := -mhard-float -msse $(cc_stack_align) +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align) ifdef CONFIG_CC_IS_CLANG -CFLAGS_dcn20_resource.o += -msse2 +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 endif AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index b2b39090fb5722..8cd9de8b1a7a19 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -3,7 +3,7 @@ DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o -CFLAGS_dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4 +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4 AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index af2a864a6da05c..5b2a65b424036e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -36,25 +36,22 @@ ifdef CONFIG_CC_IS_CLANG dml_ccflags += -msse2 endif -CFLAGS_display_mode_lib.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) ifdef CONFIG_DRM_AMD_DC_DCN2_0 -CFLAGS_display_mode_vba.o := $(dml_ccflags) -CFLAGS_display_mode_vba_20.o := $(dml_ccflags) -CFLAGS_display_rq_dlg_calc_20.o := $(dml_ccflags) -CFLAGS_display_mode_vba_20v2.o := $(dml_ccflags) -CFLAGS_display_rq_dlg_calc_20v2.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) endif ifdef CONFIG_DRM_AMD_DC_DCN2_1 -CFLAGS_display_mode_vba_21.o := $(dml_ccflags) -CFLAGS_display_rq_dlg_calc_21.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) endif -ifdef CONFIG_DRM_AMD_DCN3AG -CFLAGS_display_mode_vba_3ag.o := $(dml_ccflags) -endif -CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags) -CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags) -CFLAGS_dml_common_defs.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ dml_common_defs.o diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 17db603f2d1f1d..b456cd23c6fa46 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -13,10 +13,9 @@ ifdef CONFIG_CC_IS_CLANG dsc_ccflags += -msse2 endif -CFLAGS_rc_calc.o := $(dsc_ccflags) -CFLAGS_rc_calc_dpi.o := $(dsc_ccflags) -CFLAGS_codec_main_amd.o := $(dsc_ccflags) -CFLAGS_dc_dsc.o := $(dsc_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dsc/dc_dsc.o := $(dsc_ccflags) DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 658b930d34a81a..2587ea834f067f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -26,7 +26,7 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror # Fine grained warnings disable CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) -CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init) +CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) subdir-ccflags-y += \ $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 482a2c1b340a03..43b312d06e3efc 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -18,6 +18,7 @@ #include #include +#include #include #include "cxl.h" @@ -315,6 +316,9 @@ static int __init init_cxl(void) { int rc = 0; + if (!tlbie_capable) + return -EINVAL; + if ((rc = cxl_file_init())) return rc; diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c index 7210d9e059beed..ef73cf35dda2b3 100644 --- a/drivers/misc/ocxl/main.c +++ b/drivers/misc/ocxl/main.c @@ -2,12 +2,16 @@ // Copyright 2017 IBM Corp. #include #include +#include #include "ocxl_internal.h" static int __init init_ocxl(void) { int rc = 0; + if (!tlbie_capable) + return -EINVAL; + rc = ocxl_file_init(); if (rc) return rc; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile b/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile deleted file mode 100644 index 805fa28f391a0c..00000000000000 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/accel/Makefile deleted file mode 100644 index c78512eed8d73a..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile deleted file mode 100644 index c78512eed8d73a..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile deleted file mode 100644 index c78512eed8d73a..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/Makefile deleted file mode 100644 index 5ee42991900ae1..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/Makefile +++ /dev/null @@ -1 +0,0 @@ -subdir-ccflags-y += -I$(src)/../.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/Makefile deleted file mode 100644 index c78512eed8d73a..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/fpga/Makefile deleted file mode 100644 index c78512eed8d73a..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/Makefile deleted file mode 100644 index c78512eed8d73a..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/lib/Makefile deleted file mode 100644 index c78512eed8d73a..00000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/netronome/nfp/bpf/Makefile b/drivers/net/ethernet/netronome/nfp/bpf/Makefile deleted file mode 100644 index 805fa28f391a0c..00000000000000 --- a/drivers/net/ethernet/netronome/nfp/bpf/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/flower/Makefile b/drivers/net/ethernet/netronome/nfp/flower/Makefile deleted file mode 100644 index 805fa28f391a0c..00000000000000 --- a/drivers/net/ethernet/netronome/nfp/flower/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile deleted file mode 100644 index 805fa28f391a0c..00000000000000 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile deleted file mode 100644 index 805fa28f391a0c..00000000000000 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/nic/Makefile b/drivers/net/ethernet/netronome/nfp/nic/Makefile deleted file mode 100644 index 805fa28f391a0c..00000000000000 --- a/drivers/net/ethernet/netronome/nfp/nic/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index 6758fd7c382e35..d7b2b47bc33ebb 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -419,9 +419,21 @@ static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state) static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); + struct pci_dev *bridge = php_slot->pdev; + u16 new, mask; - /* FIXME: Make it real once firmware supports it */ php_slot->attention_state = state; + if (!bridge) + return 0; + + mask = PCI_EXP_SLTCTL_AIC; + + if (state) + new = PCI_EXP_SLTCTL_ATTN_IND_ON; + else + new = PCI_EXP_SLTCTL_ATTN_IND_OFF; + + pcie_capability_clear_and_set_word(bridge, PCI_EXP_SLTCTL, mask, new); return 0; } @@ -511,6 +523,37 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) return 0; } +static int pnv_php_reset_slot(struct hotplug_slot *slot, int probe) +{ + struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); + struct pci_dev *bridge = php_slot->pdev; + uint16_t sts; + + /* + * The CAPI folks want pnv_php to drive OpenCAPI slots + * which don't have a bridge. Only claim to support + * reset_slot() if we have a bridge device (for now...) + */ + if (probe) + return !bridge; + + /* mask our interrupt while resetting the bridge */ + if (php_slot->irq > 0) + disable_irq(php_slot->irq); + + pci_bridge_secondary_bus_reset(bridge); + + /* clear any state changes that happened due to the reset */ + pcie_capability_read_word(php_slot->pdev, PCI_EXP_SLTSTA, &sts); + sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + pcie_capability_write_word(php_slot->pdev, PCI_EXP_SLTSTA, sts); + + if (php_slot->irq > 0) + enable_irq(php_slot->irq); + + return 0; +} + static int pnv_php_enable_slot(struct hotplug_slot *slot) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); @@ -548,6 +591,7 @@ static const struct hotplug_slot_ops php_slot_ops = { .set_attention_status = pnv_php_set_attention_state, .enable_slot = pnv_php_enable_slot, .disable_slot = pnv_php_disable_slot, + .reset_slot = pnv_php_reset_slot, }; static void pnv_php_release(struct pnv_php_slot *php_slot) @@ -721,6 +765,12 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); + + pci_dbg(pdev, "PCI slot [%s]: HP int! DLAct: %d, PresDet: %d\n", + php_slot->name, + !!(sts & PCI_EXP_SLTSTA_DLLSC), + !!(sts & PCI_EXP_SLTSTA_PDC)); + if (sts & PCI_EXP_SLTSTA_DLLSC) { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts); added = !!(lsts & PCI_EXP_LNKSTA_DLLLA); @@ -735,6 +785,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) added = !!(presence == OPAL_PCI_SLOT_PRESENT); } else { + pci_dbg(pdev, "PCI slot [%s]: Spurious IRQ?\n", php_slot->name); return IRQ_NONE; } @@ -955,6 +1006,9 @@ static int __init pnv_php_init(void) for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") pnv_php_register(dn); + for_each_compatible_node(dn, NULL, "ibm,ioda3-phb") + pnv_php_register(dn); + return 0; } @@ -964,6 +1018,9 @@ static void __exit pnv_php_exit(void) for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") pnv_php_unregister(dn); + + for_each_compatible_node(dn, NULL, "ibm,ioda3-phb") + pnv_php_unregister(dn); } module_init(pnv_php_init); diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index bcd5d357ca238f..c3899ee1db9950 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, struct of_drc_info drc; const __be32 *value; char cell_drc_name[MAX_DRC_NAME_LEN]; - int j, fndit; + int j; info = of_find_property(dn->parent, "ibm,drc-info", NULL); if (info == NULL) @@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, /* Should now know end of current entry */ - if (my_index > drc.last_drc_index) - continue; - - fndit = 1; - break; + /* Found it */ + if (my_index <= drc.last_drc_index) { + sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, + my_index); + break; + } } - /* Found it */ - - if (fndit) - sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, - my_index); if (((drc_name == NULL) || (drc_name && !strcmp(drc_name, cell_drc_name))) && diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index f14f1f053a75b8..88a047b9fa6fa4 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -335,6 +335,7 @@ static int serdes_am654_clk_mux_set_parent(struct clk_hw *hw, u8 index) { struct serdes_am654_clk_mux *mux = to_serdes_am654_clk_mux(hw); struct regmap *regmap = mux->regmap; + const char *name = clk_hw_get_name(hw); unsigned int reg = mux->reg; int clk_id = mux->clk_id; int parents[SERDES_NUM_CLOCKS]; @@ -374,8 +375,7 @@ static int serdes_am654_clk_mux_set_parent(struct clk_hw *hw, u8 index) * This can never happen, unless we missed * a valid combination in serdes_am654_mux_table. */ - WARN(1, "Failed to find the parent of %s clock\n", - hw->init->name); + WARN(1, "Failed to find the parent of %s clock\n", name); return -EINVAL; } diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index c0e75c37360590..d50ee023b559ae 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -279,7 +279,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, of_property_read_string_index(node, "clock-output-names", 1, &clkout_name); - rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name, + rtc->ext_losc = clk_register_gate(NULL, clkout_name, init.name, 0, rtc->base + SUN6I_LOSC_OUT_GATING, SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0, &rtc->lock); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 703948c9fbe107..02206162eaa9ea 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -438,11 +438,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); /* - * Try to reset the device. The success of this is dependent on - * being able to lock the device, which is not always possible. + * Try to get the locks ourselves to prevent a deadlock. The + * success of this is dependent on being able to lock the device, + * which is not always possible. + * We can not use the "try" reset interface here, which will + * overwrite the previously restored configuration information. */ - if (vdev->reset_works && !pci_try_reset_function(pdev)) - vdev->needs_reset = false; + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) { + if (device_trylock(&pdev->dev)) { + if (!__pci_reset_function_locked(pdev)) + vdev->needs_reset = false; + device_unlock(&pdev->dev); + } + pci_cfg_access_unlock(pdev); + } pci_restore_state(pdev); out: diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 8ce9ad21129f08..3b18fa4d090a30 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -435,7 +435,7 @@ static int tce_iommu_clear(struct tce_container *container, unsigned long oldhpa; long ret; enum dma_data_direction direction; - unsigned long lastentry = entry + pages; + unsigned long lastentry = entry + pages, firstentry = entry; for ( ; entry < lastentry; ++entry) { if (tbl->it_indirect_levels && tbl->it_userspace) { @@ -460,7 +460,7 @@ static int tce_iommu_clear(struct tce_container *container, direction = DMA_NONE; oldhpa = 0; - ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa, + ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa, &direction); if (ret) continue; @@ -476,6 +476,8 @@ static int tce_iommu_clear(struct tce_container *container, tce_iommu_unuse_page(container, oldhpa); } + iommu_tce_kill(tbl, firstentry, pages); + return 0; } @@ -518,8 +520,8 @@ static long tce_iommu_build(struct tce_container *container, hpa |= offset; dirtmp = direction; - ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa, - &dirtmp); + ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, + &hpa, &dirtmp); if (ret) { tce_iommu_unuse_page(container, hpa); pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", @@ -536,6 +538,8 @@ static long tce_iommu_build(struct tce_container *container, if (ret) tce_iommu_clear(container, tbl, entry, i); + else + iommu_tce_kill(tbl, entry, pages); return ret; } @@ -572,8 +576,8 @@ static long tce_iommu_build_v2(struct tce_container *container, if (mm_iommu_mapped_inc(mem)) break; - ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa, - &dirtmp); + ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, + &hpa, &dirtmp); if (ret) { /* dirtmp cannot be DMA_NONE here */ tce_iommu_unuse_page_v2(container, tbl, entry + i); @@ -593,6 +597,8 @@ static long tce_iommu_build_v2(struct tce_container *container, if (ret) tce_iommu_clear(container, tbl, entry, i); + else + iommu_tce_kill(tbl, entry, pages); return ret; } @@ -1234,7 +1240,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, static int tce_iommu_attach_group(void *iommu_data, struct iommu_group *iommu_group) { - int ret; + int ret = 0; struct tce_container *container = iommu_data; struct iommu_table_group *table_group; struct tce_iommu_group *tcegrp = NULL; @@ -1287,13 +1293,13 @@ static int tce_iommu_attach_group(void *iommu_data, !table_group->ops->release_ownership) { if (container->v2) { ret = -EPERM; - goto unlock_exit; + goto free_exit; } ret = tce_iommu_take_ownership(container, table_group); } else { if (!container->v2) { ret = -EPERM; - goto unlock_exit; + goto free_exit; } ret = tce_iommu_take_ownership_ddw(container, table_group); if (!tce_groups_attached(container) && !container->tables[0]) @@ -1305,10 +1311,11 @@ static int tce_iommu_attach_group(void *iommu_data, list_add(&tcegrp->next, &container->group_list); } -unlock_exit: +free_exit: if (ret && tcegrp) kfree(tcegrp); +unlock_exit: mutex_unlock(&container->lock); return ret; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index ad830abe10212a..9a50b0558fa93d 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -62,6 +62,7 @@ MODULE_PARM_DESC(dma_entry_limit, struct vfio_iommu { struct list_head domain_list; + struct list_head iova_list; struct vfio_domain *external_domain; /* domain for external user */ struct mutex lock; struct rb_root dma_list; @@ -97,6 +98,12 @@ struct vfio_group { bool mdev_group; /* An mdev group */ }; +struct vfio_iova { + struct list_head list; + dma_addr_t start; + dma_addr_t end; +}; + /* * Guest RAM pinning working set or DMA target */ @@ -1038,6 +1045,27 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, return ret; } +/* + * Check dma map request is within a valid iova range + */ +static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu, + dma_addr_t start, dma_addr_t end) +{ + struct list_head *iova = &iommu->iova_list; + struct vfio_iova *node; + + list_for_each_entry(node, iova, list) { + if (start >= node->start && end <= node->end) + return true; + } + + /* + * Check for list_empty() as well since a container with + * a single mdev device will have an empty list. + */ + return list_empty(iova); +} + static int vfio_dma_do_map(struct vfio_iommu *iommu, struct vfio_iommu_type1_dma_map *map) { @@ -1081,6 +1109,11 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, goto out_unlock; } + if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { + ret = -EINVAL; + goto out_unlock; + } + dma = kzalloc(sizeof(*dma), GFP_KERNEL); if (!dma) { ret = -ENOMEM; @@ -1270,15 +1303,13 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, return NULL; } -static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) +static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, + phys_addr_t *base) { - struct list_head group_resv_regions; - struct iommu_resv_region *region, *next; + struct iommu_resv_region *region; bool ret = false; - INIT_LIST_HEAD(&group_resv_regions); - iommu_get_group_resv_regions(group, &group_resv_regions); - list_for_each_entry(region, &group_resv_regions, list) { + list_for_each_entry(region, group_resv_regions, list) { /* * The presence of any 'real' MSI regions should take * precedence over the software-managed one if the @@ -1294,8 +1325,7 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) ret = true; } } - list_for_each_entry_safe(region, next, &group_resv_regions, list) - kfree(region); + return ret; } @@ -1395,6 +1425,228 @@ static int vfio_mdev_iommu_device(struct device *dev, void *data) return 0; } +/* + * This is a helper function to insert an address range to iova list. + * The list is initially created with a single entry corresponding to + * the IOMMU domain geometry to which the device group is attached. + * The list aperture gets modified when a new domain is added to the + * container if the new aperture doesn't conflict with the current one + * or with any existing dma mappings. The list is also modified to + * exclude any reserved regions associated with the device group. + */ +static int vfio_iommu_iova_insert(struct list_head *head, + dma_addr_t start, dma_addr_t end) +{ + struct vfio_iova *region; + + region = kmalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return -ENOMEM; + + INIT_LIST_HEAD(®ion->list); + region->start = start; + region->end = end; + + list_add_tail(®ion->list, head); + return 0; +} + +/* + * Check the new iommu aperture conflicts with existing aper or with any + * existing dma mappings. + */ +static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu, + dma_addr_t start, dma_addr_t end) +{ + struct vfio_iova *first, *last; + struct list_head *iova = &iommu->iova_list; + + if (list_empty(iova)) + return false; + + /* Disjoint sets, return conflict */ + first = list_first_entry(iova, struct vfio_iova, list); + last = list_last_entry(iova, struct vfio_iova, list); + if (start > last->end || end < first->start) + return true; + + /* Check for any existing dma mappings below the new start */ + if (start > first->start) { + if (vfio_find_dma(iommu, first->start, start - first->start)) + return true; + } + + /* Check for any existing dma mappings beyond the new end */ + if (end < last->end) { + if (vfio_find_dma(iommu, end + 1, last->end - end)) + return true; + } + + return false; +} + +/* + * Resize iommu iova aperture window. This is called only if the new + * aperture has no conflict with existing aperture and dma mappings. + */ +static int vfio_iommu_aper_resize(struct list_head *iova, + dma_addr_t start, dma_addr_t end) +{ + struct vfio_iova *node, *next; + + if (list_empty(iova)) + return vfio_iommu_iova_insert(iova, start, end); + + /* Adjust iova list start */ + list_for_each_entry_safe(node, next, iova, list) { + if (start < node->start) + break; + if (start >= node->start && start < node->end) { + node->start = start; + break; + } + /* Delete nodes before new start */ + list_del(&node->list); + kfree(node); + } + + /* Adjust iova list end */ + list_for_each_entry_safe(node, next, iova, list) { + if (end > node->end) + continue; + if (end > node->start && end <= node->end) { + node->end = end; + continue; + } + /* Delete nodes after new end */ + list_del(&node->list); + kfree(node); + } + + return 0; +} + +/* + * Check reserved region conflicts with existing dma mappings + */ +static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu, + struct list_head *resv_regions) +{ + struct iommu_resv_region *region; + + /* Check for conflict with existing dma mappings */ + list_for_each_entry(region, resv_regions, list) { + if (region->type == IOMMU_RESV_DIRECT_RELAXABLE) + continue; + + if (vfio_find_dma(iommu, region->start, region->length)) + return true; + } + + return false; +} + +/* + * Check iova region overlap with reserved regions and + * exclude them from the iommu iova range + */ +static int vfio_iommu_resv_exclude(struct list_head *iova, + struct list_head *resv_regions) +{ + struct iommu_resv_region *resv; + struct vfio_iova *n, *next; + + list_for_each_entry(resv, resv_regions, list) { + phys_addr_t start, end; + + if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) + continue; + + start = resv->start; + end = resv->start + resv->length - 1; + + list_for_each_entry_safe(n, next, iova, list) { + int ret = 0; + + /* No overlap */ + if (start > n->end || end < n->start) + continue; + /* + * Insert a new node if current node overlaps with the + * reserve region to exlude that from valid iova range. + * Note that, new node is inserted before the current + * node and finally the current node is deleted keeping + * the list updated and sorted. + */ + if (start > n->start) + ret = vfio_iommu_iova_insert(&n->list, n->start, + start - 1); + if (!ret && end < n->end) + ret = vfio_iommu_iova_insert(&n->list, end + 1, + n->end); + if (ret) + return ret; + + list_del(&n->list); + kfree(n); + } + } + + if (list_empty(iova)) + return -EINVAL; + + return 0; +} + +static void vfio_iommu_resv_free(struct list_head *resv_regions) +{ + struct iommu_resv_region *n, *next; + + list_for_each_entry_safe(n, next, resv_regions, list) { + list_del(&n->list); + kfree(n); + } +} + +static void vfio_iommu_iova_free(struct list_head *iova) +{ + struct vfio_iova *n, *next; + + list_for_each_entry_safe(n, next, iova, list) { + list_del(&n->list); + kfree(n); + } +} + +static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu, + struct list_head *iova_copy) +{ + struct list_head *iova = &iommu->iova_list; + struct vfio_iova *n; + int ret; + + list_for_each_entry(n, iova, list) { + ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end); + if (ret) + goto out_free; + } + + return 0; + +out_free: + vfio_iommu_iova_free(iova_copy); + return ret; +} + +static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu, + struct list_head *iova_copy) +{ + struct list_head *iova = &iommu->iova_list; + + vfio_iommu_iova_free(iova); + + list_splice_tail(iova_copy, iova); +} static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_group *iommu_group) { @@ -1405,6 +1657,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, int ret; bool resv_msi, msi_remap; phys_addr_t resv_msi_base; + struct iommu_domain_geometry geo; + LIST_HEAD(iova_copy); + LIST_HEAD(group_resv_regions); mutex_lock(&iommu->lock); @@ -1481,7 +1736,43 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_domain; - resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base); + /* Get aperture info */ + iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo); + + if (vfio_iommu_aper_conflict(iommu, geo.aperture_start, + geo.aperture_end)) { + ret = -EINVAL; + goto out_detach; + } + + ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions); + if (ret) + goto out_detach; + + if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) { + ret = -EINVAL; + goto out_detach; + } + + /* + * We don't want to work on the original iova list as the list + * gets modified and in case of failure we have to retain the + * original list. Get a copy here. + */ + ret = vfio_iommu_iova_get_copy(iommu, &iova_copy); + if (ret) + goto out_detach; + + ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start, + geo.aperture_end); + if (ret) + goto out_detach; + + ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions); + if (ret) + goto out_detach; + + resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base); INIT_LIST_HEAD(&domain->group_list); list_add(&group->next, &domain->group_list); @@ -1514,8 +1805,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, list_add(&group->next, &d->group_list); iommu_domain_free(domain->domain); kfree(domain); - mutex_unlock(&iommu->lock); - return 0; + goto done; } ret = vfio_iommu_attach_group(domain, group); @@ -1538,8 +1828,11 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, } list_add(&domain->next, &iommu->domain_list); - +done: + /* Delete the old one and insert new iova list */ + vfio_iommu_iova_insert_copy(iommu, &iova_copy); mutex_unlock(&iommu->lock); + vfio_iommu_resv_free(&group_resv_regions); return 0; @@ -1547,6 +1840,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, vfio_iommu_detach_group(domain, group); out_domain: iommu_domain_free(domain->domain); + vfio_iommu_iova_free(&iova_copy); + vfio_iommu_resv_free(&group_resv_regions); out_free: kfree(domain); kfree(group); @@ -1602,12 +1897,93 @@ static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu) WARN_ON(iommu->notifier.head); } +/* + * Called when a domain is removed in detach. It is possible that + * the removed domain decided the iova aperture window. Modify the + * iova aperture with the smallest window among existing domains. + */ +static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, + struct list_head *iova_copy) +{ + struct vfio_domain *domain; + struct iommu_domain_geometry geo; + struct vfio_iova *node; + dma_addr_t start = 0; + dma_addr_t end = (dma_addr_t)~0; + + if (list_empty(iova_copy)) + return; + + list_for_each_entry(domain, &iommu->domain_list, next) { + iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, + &geo); + if (geo.aperture_start > start) + start = geo.aperture_start; + if (geo.aperture_end < end) + end = geo.aperture_end; + } + + /* Modify aperture limits. The new aper is either same or bigger */ + node = list_first_entry(iova_copy, struct vfio_iova, list); + node->start = start; + node = list_last_entry(iova_copy, struct vfio_iova, list); + node->end = end; +} + +/* + * Called when a group is detached. The reserved regions for that + * group can be part of valid iova now. But since reserved regions + * may be duplicated among groups, populate the iova valid regions + * list again. + */ +static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu, + struct list_head *iova_copy) +{ + struct vfio_domain *d; + struct vfio_group *g; + struct vfio_iova *node; + dma_addr_t start, end; + LIST_HEAD(resv_regions); + int ret; + + if (list_empty(iova_copy)) + return -EINVAL; + + list_for_each_entry(d, &iommu->domain_list, next) { + list_for_each_entry(g, &d->group_list, next) { + ret = iommu_get_group_resv_regions(g->iommu_group, + &resv_regions); + if (ret) + goto done; + } + } + + node = list_first_entry(iova_copy, struct vfio_iova, list); + start = node->start; + node = list_last_entry(iova_copy, struct vfio_iova, list); + end = node->end; + + /* purge the iova list and create new one */ + vfio_iommu_iova_free(iova_copy); + + ret = vfio_iommu_aper_resize(iova_copy, start, end); + if (ret) + goto done; + + /* Exclude current reserved regions from iova ranges */ + ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions); +done: + vfio_iommu_resv_free(&resv_regions); + return ret; +} + static void vfio_iommu_type1_detach_group(void *iommu_data, struct iommu_group *iommu_group) { struct vfio_iommu *iommu = iommu_data; struct vfio_domain *domain; struct vfio_group *group; + LIST_HEAD(iova_copy); mutex_lock(&iommu->lock); @@ -1630,6 +2006,13 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, } } + /* + * Get a copy of iova list. This will be used to update + * and to replace the current one later. Please note that + * we will leave the original list as it is if update fails. + */ + vfio_iommu_iova_get_copy(iommu, &iova_copy); + list_for_each_entry(domain, &iommu->domain_list, next) { group = find_iommu_group(domain, iommu_group); if (!group) @@ -1655,10 +2038,16 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, iommu_domain_free(domain->domain); list_del(&domain->next); kfree(domain); + vfio_iommu_aper_expand(iommu, &iova_copy); } break; } + if (!vfio_iommu_resv_refresh(iommu, &iova_copy)) + vfio_iommu_iova_insert_copy(iommu, &iova_copy); + else + vfio_iommu_iova_free(&iova_copy); + detach_group_done: mutex_unlock(&iommu->lock); } @@ -1686,6 +2075,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) } INIT_LIST_HEAD(&iommu->domain_list); + INIT_LIST_HEAD(&iommu->iova_list); iommu->dma_list = RB_ROOT; iommu->dma_avail = dma_entry_limit; mutex_init(&iommu->lock); @@ -1729,6 +2119,9 @@ static void vfio_iommu_type1_release(void *iommu_data) list_del(&domain->next); kfree(domain); } + + vfio_iommu_iova_free(&iommu->iova_list); + kfree(iommu); } @@ -1749,6 +2142,73 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) return ret; } +static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, + struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, + size_t size) +{ + struct vfio_info_cap_header *header; + struct vfio_iommu_type1_info_cap_iova_range *iova_cap; + + header = vfio_info_cap_add(caps, size, + VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1); + if (IS_ERR(header)) + return PTR_ERR(header); + + iova_cap = container_of(header, + struct vfio_iommu_type1_info_cap_iova_range, + header); + iova_cap->nr_iovas = cap_iovas->nr_iovas; + memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges, + cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges)); + return 0; +} + +static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu, + struct vfio_info_cap *caps) +{ + struct vfio_iommu_type1_info_cap_iova_range *cap_iovas; + struct vfio_iova *iova; + size_t size; + int iovas = 0, i = 0, ret; + + mutex_lock(&iommu->lock); + + list_for_each_entry(iova, &iommu->iova_list, list) + iovas++; + + if (!iovas) { + /* + * Return 0 as a container with a single mdev device + * will have an empty list + */ + ret = 0; + goto out_unlock; + } + + size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges)); + + cap_iovas = kzalloc(size, GFP_KERNEL); + if (!cap_iovas) { + ret = -ENOMEM; + goto out_unlock; + } + + cap_iovas->nr_iovas = iovas; + + list_for_each_entry(iova, &iommu->iova_list, list) { + cap_iovas->iova_ranges[i].start = iova->start; + cap_iovas->iova_ranges[i].end = iova->end; + i++; + } + + ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size); + + kfree(cap_iovas); +out_unlock: + mutex_unlock(&iommu->lock); + return ret; +} + static long vfio_iommu_type1_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -1770,19 +2230,53 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, } } else if (cmd == VFIO_IOMMU_GET_INFO) { struct vfio_iommu_type1_info info; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + unsigned long capsz; + int ret; minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); + /* For backward compatibility, cannot require this */ + capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); + if (copy_from_user(&info, (void __user *)arg, minsz)) return -EFAULT; if (info.argsz < minsz) return -EINVAL; + if (info.argsz >= capsz) { + minsz = capsz; + info.cap_offset = 0; /* output, no-recopy necessary */ + } + info.flags = VFIO_IOMMU_INFO_PGSIZES; info.iova_pgsizes = vfio_pgsize_bitmap(iommu); + ret = vfio_iommu_iova_build_caps(iommu, &caps); + if (ret) + return ret; + + if (caps.size) { + info.flags |= VFIO_IOMMU_INFO_CAPS; + + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + + kfree(caps.buf); + } + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 7bcc92add72c1f..7b13988796e173 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -104,9 +104,9 @@ static int pfn_is_ram(unsigned long pfn) } /* Reads a page from the oldmem device from given offset. */ -static ssize_t read_from_oldmem(char *buf, size_t count, - u64 *ppos, int userbuf, - bool encrypted) +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) { unsigned long pfn, offset; size_t nr_bytes; @@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr) */ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) { - return read_from_oldmem(buf, count, ppos, 0, sev_active()); + return read_from_oldmem(buf, count, ppos, 0, false); } /* diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h index 7138384e2ef9f4..babd08a1d2267b 100644 --- a/include/dt-bindings/bus/ti-sysc.h +++ b/include/dt-bindings/bus/ti-sysc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* TI sysc interconnect target module defines */ /* Generic sysc found on omap2 and later, also known as type1 */ diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h new file mode 100644 index 00000000000000..38074a5f729692 --- /dev/null +++ b/include/dt-bindings/clock/ast2600-clock.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */ +#ifndef DT_BINDINGS_AST2600_CLOCK_H +#define DT_BINDINGS_AST2600_CLOCK_H + +#define ASPEED_CLK_GATE_ECLK 0 +#define ASPEED_CLK_GATE_GCLK 1 + +#define ASPEED_CLK_GATE_MCLK 2 + +#define ASPEED_CLK_GATE_VCLK 3 +#define ASPEED_CLK_GATE_BCLK 4 +#define ASPEED_CLK_GATE_DCLK 5 + +#define ASPEED_CLK_GATE_LCLK 6 +#define ASPEED_CLK_GATE_LHCCLK 7 + +#define ASPEED_CLK_GATE_D1CLK 8 +#define ASPEED_CLK_GATE_YCLK 9 + +#define ASPEED_CLK_GATE_REF0CLK 10 +#define ASPEED_CLK_GATE_REF1CLK 11 + +#define ASPEED_CLK_GATE_ESPICLK 12 + +#define ASPEED_CLK_GATE_USBUHCICLK 13 +#define ASPEED_CLK_GATE_USBPORT1CLK 14 +#define ASPEED_CLK_GATE_USBPORT2CLK 15 + +#define ASPEED_CLK_GATE_RSACLK 16 +#define ASPEED_CLK_GATE_RVASCLK 17 + +#define ASPEED_CLK_GATE_MAC1CLK 18 +#define ASPEED_CLK_GATE_MAC2CLK 19 +#define ASPEED_CLK_GATE_MAC3CLK 20 +#define ASPEED_CLK_GATE_MAC4CLK 21 + +#define ASPEED_CLK_GATE_UART1CLK 22 +#define ASPEED_CLK_GATE_UART2CLK 23 +#define ASPEED_CLK_GATE_UART3CLK 24 +#define ASPEED_CLK_GATE_UART4CLK 25 +#define ASPEED_CLK_GATE_UART5CLK 26 +#define ASPEED_CLK_GATE_UART6CLK 27 +#define ASPEED_CLK_GATE_UART7CLK 28 +#define ASPEED_CLK_GATE_UART8CLK 29 +#define ASPEED_CLK_GATE_UART9CLK 30 +#define ASPEED_CLK_GATE_UART10CLK 31 +#define ASPEED_CLK_GATE_UART11CLK 32 +#define ASPEED_CLK_GATE_UART12CLK 33 +#define ASPEED_CLK_GATE_UART13CLK 34 + +#define ASPEED_CLK_GATE_SDCLK 35 +#define ASPEED_CLK_GATE_EMMCCLK 36 + +#define ASPEED_CLK_GATE_I3C0CLK 37 +#define ASPEED_CLK_GATE_I3C1CLK 38 +#define ASPEED_CLK_GATE_I3C2CLK 39 +#define ASPEED_CLK_GATE_I3C3CLK 40 +#define ASPEED_CLK_GATE_I3C4CLK 41 +#define ASPEED_CLK_GATE_I3C5CLK 42 +#define ASPEED_CLK_GATE_I3C6CLK 43 +#define ASPEED_CLK_GATE_I3C7CLK 44 + +#define ASPEED_CLK_GATE_FSICLK 45 + +#define ASPEED_CLK_HPLL 46 +#define ASPEED_CLK_MPLL 47 +#define ASPEED_CLK_DPLL 48 +#define ASPEED_CLK_EPLL 49 +#define ASPEED_CLK_APLL 50 +#define ASPEED_CLK_AHB 51 +#define ASPEED_CLK_APB1 52 +#define ASPEED_CLK_APB2 53 +#define ASPEED_CLK_BCLK 54 +#define ASPEED_CLK_D1CLK 55 +#define ASPEED_CLK_VCLK 56 +#define ASPEED_CLK_LHCLK 57 +#define ASPEED_CLK_UART 58 +#define ASPEED_CLK_UARTX 59 +#define ASPEED_CLK_SDIO 60 +#define ASPEED_CLK_EMMC 61 +#define ASPEED_CLK_ECLK 62 +#define ASPEED_CLK_ECLK_MUX 63 +#define ASPEED_CLK_MAC12 64 +#define ASPEED_CLK_MAC34 65 +#define ASPEED_CLK_USBPHY_40M 66 + +/* Only list resets here that are not part of a gate */ +#define ASPEED_RESET_ADC 55 +#define ASPEED_RESET_JTAG_MASTER2 54 +#define ASPEED_RESET_I3C_DMA 39 +#define ASPEED_RESET_PWM 37 +#define ASPEED_RESET_PECI 36 +#define ASPEED_RESET_MII 35 +#define ASPEED_RESET_I2C 34 +#define ASPEED_RESET_H2X 31 +#define ASPEED_RESET_GP_MCU 30 +#define ASPEED_RESET_DP_MCU 29 +#define ASPEED_RESET_DP 28 +#define ASPEED_RESET_RC_XDMA 27 +#define ASPEED_RESET_GRAPHICS 26 +#define ASPEED_RESET_DEV_XDMA 25 +#define ASPEED_RESET_DEV_MCTP 24 +#define ASPEED_RESET_RC_MCTP 23 +#define ASPEED_RESET_JTAG_MASTER 22 +#define ASPEED_RESET_PCIE_DEV_O 21 +#define ASPEED_RESET_PCIE_DEV_OEN 20 +#define ASPEED_RESET_PCIE_RC_O 19 +#define ASPEED_RESET_PCIE_RC_OEN 18 +#define ASPEED_RESET_PCI_DP 5 +#define ASPEED_RESET_AHB 1 +#define ASPEED_RESET_SDRAM 0 + +#endif diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index 2cec01f968979b..b60c03430cf157 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -58,3 +58,5 @@ #define BCM2835_CLOCK_DSI1E 48 #define BCM2835_CLOCK_DSI0P 49 #define BCM2835_CLOCK_DSI1P 50 + +#define BCM2711_CLOCK_EMMC2 51 diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h index 5255b1c2420e2c..d7b201652f4cd1 100644 --- a/include/dt-bindings/clock/imx8mn-clock.h +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -209,7 +209,8 @@ #define IMX8MN_CLK_ARM 191 #define IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK 192 #define IMX8MN_CLK_GPU_CORE_ROOT 193 +#define IMX8MN_CLK_GIC 194 -#define IMX8MN_CLK_END 194 +#define IMX8MN_CLK_END 195 #endif diff --git a/include/dt-bindings/clock/mt6779-clk.h b/include/dt-bindings/clock/mt6779-clk.h new file mode 100644 index 00000000000000..b083139afbd2c8 --- /dev/null +++ b/include/dt-bindings/clock/mt6779-clk.h @@ -0,0 +1,436 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#ifndef _DT_BINDINGS_CLK_MT6779_H +#define _DT_BINDINGS_CLK_MT6779_H + +/* TOPCKGEN */ +#define CLK_TOP_AXI 1 +#define CLK_TOP_MM 2 +#define CLK_TOP_CAM 3 +#define CLK_TOP_MFG 4 +#define CLK_TOP_CAMTG 5 +#define CLK_TOP_UART 6 +#define CLK_TOP_SPI 7 +#define CLK_TOP_MSDC50_0_HCLK 8 +#define CLK_TOP_MSDC50_0 9 +#define CLK_TOP_MSDC30_1 10 +#define CLK_TOP_MSDC30_2 11 +#define CLK_TOP_AUD 12 +#define CLK_TOP_AUD_INTBUS 13 +#define CLK_TOP_FPWRAP_ULPOSC 14 +#define CLK_TOP_SCP 15 +#define CLK_TOP_ATB 16 +#define CLK_TOP_SSPM 17 +#define CLK_TOP_DPI0 18 +#define CLK_TOP_SCAM 19 +#define CLK_TOP_AUD_1 20 +#define CLK_TOP_AUD_2 21 +#define CLK_TOP_DISP_PWM 22 +#define CLK_TOP_SSUSB_TOP_XHCI 23 +#define CLK_TOP_USB_TOP 24 +#define CLK_TOP_SPM 25 +#define CLK_TOP_I2C 26 +#define CLK_TOP_F52M_MFG 27 +#define CLK_TOP_SENINF 28 +#define CLK_TOP_DXCC 29 +#define CLK_TOP_CAMTG2 30 +#define CLK_TOP_AUD_ENG1 31 +#define CLK_TOP_AUD_ENG2 32 +#define CLK_TOP_FAES_UFSFDE 33 +#define CLK_TOP_FUFS 34 +#define CLK_TOP_IMG 35 +#define CLK_TOP_DSP 36 +#define CLK_TOP_DSP1 37 +#define CLK_TOP_DSP2 38 +#define CLK_TOP_IPU_IF 39 +#define CLK_TOP_CAMTG3 40 +#define CLK_TOP_CAMTG4 41 +#define CLK_TOP_PMICSPI 42 +#define CLK_TOP_MAINPLL_CK 43 +#define CLK_TOP_MAINPLL_D2 44 +#define CLK_TOP_MAINPLL_D3 45 +#define CLK_TOP_MAINPLL_D5 46 +#define CLK_TOP_MAINPLL_D7 47 +#define CLK_TOP_MAINPLL_D2_D2 48 +#define CLK_TOP_MAINPLL_D2_D4 49 +#define CLK_TOP_MAINPLL_D2_D8 50 +#define CLK_TOP_MAINPLL_D2_D16 51 +#define CLK_TOP_MAINPLL_D3_D2 52 +#define CLK_TOP_MAINPLL_D3_D4 53 +#define CLK_TOP_MAINPLL_D3_D8 54 +#define CLK_TOP_MAINPLL_D5_D2 55 +#define CLK_TOP_MAINPLL_D5_D4 56 +#define CLK_TOP_MAINPLL_D7_D2 57 +#define CLK_TOP_MAINPLL_D7_D4 58 +#define CLK_TOP_UNIVPLL_CK 59 +#define CLK_TOP_UNIVPLL_D2 60 +#define CLK_TOP_UNIVPLL_D3 61 +#define CLK_TOP_UNIVPLL_D5 62 +#define CLK_TOP_UNIVPLL_D7 63 +#define CLK_TOP_UNIVPLL_D2_D2 64 +#define CLK_TOP_UNIVPLL_D2_D4 65 +#define CLK_TOP_UNIVPLL_D2_D8 66 +#define CLK_TOP_UNIVPLL_D3_D2 67 +#define CLK_TOP_UNIVPLL_D3_D4 68 +#define CLK_TOP_UNIVPLL_D3_D8 69 +#define CLK_TOP_UNIVPLL_D5_D2 70 +#define CLK_TOP_UNIVPLL_D5_D4 71 +#define CLK_TOP_UNIVPLL_D5_D8 72 +#define CLK_TOP_APLL1_CK 73 +#define CLK_TOP_APLL1_D2 74 +#define CLK_TOP_APLL1_D4 75 +#define CLK_TOP_APLL1_D8 76 +#define CLK_TOP_APLL2_CK 77 +#define CLK_TOP_APLL2_D2 78 +#define CLK_TOP_APLL2_D4 79 +#define CLK_TOP_APLL2_D8 80 +#define CLK_TOP_TVDPLL_CK 81 +#define CLK_TOP_TVDPLL_D2 82 +#define CLK_TOP_TVDPLL_D4 83 +#define CLK_TOP_TVDPLL_D8 84 +#define CLK_TOP_TVDPLL_D16 85 +#define CLK_TOP_MSDCPLL_CK 86 +#define CLK_TOP_MSDCPLL_D2 87 +#define CLK_TOP_MSDCPLL_D4 88 +#define CLK_TOP_MSDCPLL_D8 89 +#define CLK_TOP_MSDCPLL_D16 90 +#define CLK_TOP_AD_OSC_CK 91 +#define CLK_TOP_OSC_D2 92 +#define CLK_TOP_OSC_D4 93 +#define CLK_TOP_OSC_D8 94 +#define CLK_TOP_OSC_D16 95 +#define CLK_TOP_F26M_CK_D2 96 +#define CLK_TOP_MFGPLL_CK 97 +#define CLK_TOP_UNIVP_192M_CK 98 +#define CLK_TOP_UNIVP_192M_D2 99 +#define CLK_TOP_UNIVP_192M_D4 100 +#define CLK_TOP_UNIVP_192M_D8 101 +#define CLK_TOP_UNIVP_192M_D16 102 +#define CLK_TOP_UNIVP_192M_D32 103 +#define CLK_TOP_MMPLL_CK 104 +#define CLK_TOP_MMPLL_D4 105 +#define CLK_TOP_MMPLL_D4_D2 106 +#define CLK_TOP_MMPLL_D4_D4 107 +#define CLK_TOP_MMPLL_D5 108 +#define CLK_TOP_MMPLL_D5_D2 109 +#define CLK_TOP_MMPLL_D5_D4 110 +#define CLK_TOP_MMPLL_D6 111 +#define CLK_TOP_MMPLL_D7 112 +#define CLK_TOP_CLK26M 113 +#define CLK_TOP_CLK13M 114 +#define CLK_TOP_ADSP 115 +#define CLK_TOP_DPMAIF 116 +#define CLK_TOP_VENC 117 +#define CLK_TOP_VDEC 118 +#define CLK_TOP_CAMTM 119 +#define CLK_TOP_PWM 120 +#define CLK_TOP_ADSPPLL_CK 121 +#define CLK_TOP_I2S0_M_SEL 122 +#define CLK_TOP_I2S1_M_SEL 123 +#define CLK_TOP_I2S2_M_SEL 124 +#define CLK_TOP_I2S3_M_SEL 125 +#define CLK_TOP_I2S4_M_SEL 126 +#define CLK_TOP_I2S5_M_SEL 127 +#define CLK_TOP_APLL12_DIV0 128 +#define CLK_TOP_APLL12_DIV1 129 +#define CLK_TOP_APLL12_DIV2 130 +#define CLK_TOP_APLL12_DIV3 131 +#define CLK_TOP_APLL12_DIV4 132 +#define CLK_TOP_APLL12_DIVB 133 +#define CLK_TOP_APLL12_DIV5 134 +#define CLK_TOP_IPE 135 +#define CLK_TOP_DPE 136 +#define CLK_TOP_CCU 137 +#define CLK_TOP_DSP3 138 +#define CLK_TOP_SENINF1 139 +#define CLK_TOP_SENINF2 140 +#define CLK_TOP_AUD_H 141 +#define CLK_TOP_CAMTG5 142 +#define CLK_TOP_TVDPLL_MAINPLL_D2_CK 143 +#define CLK_TOP_AD_OSC2_CK 144 +#define CLK_TOP_OSC2_D2 145 +#define CLK_TOP_OSC2_D3 146 +#define CLK_TOP_FMEM_466M_CK 147 +#define CLK_TOP_ADSPPLL_D4 148 +#define CLK_TOP_ADSPPLL_D5 149 +#define CLK_TOP_ADSPPLL_D6 150 +#define CLK_TOP_OSC_D10 151 +#define CLK_TOP_UNIVPLL_D3_D16 152 +#define CLK_TOP_NR_CLK 153 + +/* APMIXED */ +#define CLK_APMIXED_ARMPLL_LL 1 +#define CLK_APMIXED_ARMPLL_BL 2 +#define CLK_APMIXED_ARMPLL_BB 3 +#define CLK_APMIXED_CCIPLL 4 +#define CLK_APMIXED_MAINPLL 5 +#define CLK_APMIXED_UNIV2PLL 6 +#define CLK_APMIXED_MSDCPLL 7 +#define CLK_APMIXED_ADSPPLL 8 +#define CLK_APMIXED_MMPLL 9 +#define CLK_APMIXED_MFGPLL 10 +#define CLK_APMIXED_TVDPLL 11 +#define CLK_APMIXED_APLL1 12 +#define CLK_APMIXED_APLL2 13 +#define CLK_APMIXED_SSUSB26M 14 +#define CLK_APMIXED_APPLL26M 15 +#define CLK_APMIXED_MIPIC0_26M 16 +#define CLK_APMIXED_MDPLLGP26M 17 +#define CLK_APMIXED_MM_F26M 18 +#define CLK_APMIXED_UFS26M 19 +#define CLK_APMIXED_MIPIC1_26M 20 +#define CLK_APMIXED_MEMPLL26M 21 +#define CLK_APMIXED_CLKSQ_LVPLL_26M 22 +#define CLK_APMIXED_MIPID0_26M 23 +#define CLK_APMIXED_MIPID1_26M 24 +#define CLK_APMIXED_NR_CLK 25 + +/* CAMSYS */ +#define CLK_CAM_LARB10 1 +#define CLK_CAM_DFP_VAD 2 +#define CLK_CAM_LARB11 3 +#define CLK_CAM_LARB9 4 +#define CLK_CAM_CAM 5 +#define CLK_CAM_CAMTG 6 +#define CLK_CAM_SENINF 7 +#define CLK_CAM_CAMSV0 8 +#define CLK_CAM_CAMSV1 9 +#define CLK_CAM_CAMSV2 10 +#define CLK_CAM_CAMSV3 11 +#define CLK_CAM_CCU 12 +#define CLK_CAM_FAKE_ENG 13 +#define CLK_CAM_NR_CLK 14 + +/* INFRA */ +#define CLK_INFRA_PMIC_TMR 1 +#define CLK_INFRA_PMIC_AP 2 +#define CLK_INFRA_PMIC_MD 3 +#define CLK_INFRA_PMIC_CONN 4 +#define CLK_INFRA_SCPSYS 5 +#define CLK_INFRA_SEJ 6 +#define CLK_INFRA_APXGPT 7 +#define CLK_INFRA_ICUSB 8 +#define CLK_INFRA_GCE 9 +#define CLK_INFRA_THERM 10 +#define CLK_INFRA_I2C0 11 +#define CLK_INFRA_I2C1 12 +#define CLK_INFRA_I2C2 13 +#define CLK_INFRA_I2C3 14 +#define CLK_INFRA_PWM_HCLK 15 +#define CLK_INFRA_PWM1 16 +#define CLK_INFRA_PWM2 17 +#define CLK_INFRA_PWM3 18 +#define CLK_INFRA_PWM4 19 +#define CLK_INFRA_PWM 20 +#define CLK_INFRA_UART0 21 +#define CLK_INFRA_UART1 22 +#define CLK_INFRA_UART2 23 +#define CLK_INFRA_UART3 24 +#define CLK_INFRA_GCE_26M 25 +#define CLK_INFRA_CQ_DMA_FPC 26 +#define CLK_INFRA_BTIF 27 +#define CLK_INFRA_SPI0 28 +#define CLK_INFRA_MSDC0 29 +#define CLK_INFRA_MSDC1 30 +#define CLK_INFRA_MSDC2 31 +#define CLK_INFRA_MSDC0_SCK 32 +#define CLK_INFRA_DVFSRC 33 +#define CLK_INFRA_GCPU 34 +#define CLK_INFRA_TRNG 35 +#define CLK_INFRA_AUXADC 36 +#define CLK_INFRA_CPUM 37 +#define CLK_INFRA_CCIF1_AP 38 +#define CLK_INFRA_CCIF1_MD 39 +#define CLK_INFRA_AUXADC_MD 40 +#define CLK_INFRA_MSDC1_SCK 41 +#define CLK_INFRA_MSDC2_SCK 42 +#define CLK_INFRA_AP_DMA 43 +#define CLK_INFRA_XIU 44 +#define CLK_INFRA_DEVICE_APC 45 +#define CLK_INFRA_CCIF_AP 46 +#define CLK_INFRA_DEBUGSYS 47 +#define CLK_INFRA_AUD 48 +#define CLK_INFRA_CCIF_MD 49 +#define CLK_INFRA_DXCC_SEC_CORE 50 +#define CLK_INFRA_DXCC_AO 51 +#define CLK_INFRA_DRAMC_F26M 52 +#define CLK_INFRA_IRTX 53 +#define CLK_INFRA_DISP_PWM 54 +#define CLK_INFRA_DPMAIF_CK 55 +#define CLK_INFRA_AUD_26M_BCLK 56 +#define CLK_INFRA_SPI1 57 +#define CLK_INFRA_I2C4 58 +#define CLK_INFRA_MODEM_TEMP_SHARE 59 +#define CLK_INFRA_SPI2 60 +#define CLK_INFRA_SPI3 61 +#define CLK_INFRA_UNIPRO_SCK 62 +#define CLK_INFRA_UNIPRO_TICK 63 +#define CLK_INFRA_UFS_MP_SAP_BCLK 64 +#define CLK_INFRA_MD32_BCLK 65 +#define CLK_INFRA_SSPM 66 +#define CLK_INFRA_UNIPRO_MBIST 67 +#define CLK_INFRA_SSPM_BUS_HCLK 68 +#define CLK_INFRA_I2C5 69 +#define CLK_INFRA_I2C5_ARBITER 70 +#define CLK_INFRA_I2C5_IMM 71 +#define CLK_INFRA_I2C1_ARBITER 72 +#define CLK_INFRA_I2C1_IMM 73 +#define CLK_INFRA_I2C2_ARBITER 74 +#define CLK_INFRA_I2C2_IMM 75 +#define CLK_INFRA_SPI4 76 +#define CLK_INFRA_SPI5 77 +#define CLK_INFRA_CQ_DMA 78 +#define CLK_INFRA_UFS 79 +#define CLK_INFRA_AES_UFSFDE 80 +#define CLK_INFRA_UFS_TICK 81 +#define CLK_INFRA_MSDC0_SELF 82 +#define CLK_INFRA_MSDC1_SELF 83 +#define CLK_INFRA_MSDC2_SELF 84 +#define CLK_INFRA_SSPM_26M_SELF 85 +#define CLK_INFRA_SSPM_32K_SELF 86 +#define CLK_INFRA_UFS_AXI 87 +#define CLK_INFRA_I2C6 88 +#define CLK_INFRA_AP_MSDC0 89 +#define CLK_INFRA_MD_MSDC0 90 +#define CLK_INFRA_USB 91 +#define CLK_INFRA_DEVMPU_BCLK 92 +#define CLK_INFRA_CCIF2_AP 93 +#define CLK_INFRA_CCIF2_MD 94 +#define CLK_INFRA_CCIF3_AP 95 +#define CLK_INFRA_CCIF3_MD 96 +#define CLK_INFRA_SEJ_F13M 97 +#define CLK_INFRA_AES_BCLK 98 +#define CLK_INFRA_I2C7 99 +#define CLK_INFRA_I2C8 100 +#define CLK_INFRA_FBIST2FPC 101 +#define CLK_INFRA_CCIF4_AP 102 +#define CLK_INFRA_CCIF4_MD 103 +#define CLK_INFRA_FADSP 104 +#define CLK_INFRA_SSUSB_XHCI 105 +#define CLK_INFRA_SPI6 106 +#define CLK_INFRA_SPI7 107 +#define CLK_INFRA_NR_CLK 108 + +/* MFGCFG */ +#define CLK_MFGCFG_BG3D 1 +#define CLK_MFGCFG_NR_CLK 2 + +/* IMG */ +#define CLK_IMG_WPE_A 1 +#define CLK_IMG_MFB 2 +#define CLK_IMG_DIP 3 +#define CLK_IMG_LARB6 4 +#define CLK_IMG_LARB5 5 +#define CLK_IMG_NR_CLK 6 + +/* IPE */ +#define CLK_IPE_LARB7 1 +#define CLK_IPE_LARB8 2 +#define CLK_IPE_SMI_SUBCOM 3 +#define CLK_IPE_FD 4 +#define CLK_IPE_FE 5 +#define CLK_IPE_RSC 6 +#define CLK_IPE_DPE 7 +#define CLK_IPE_NR_CLK 8 + +/* MM_CONFIG */ +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_SMI_LARB1 3 +#define CLK_MM_GALS_COMM0 4 +#define CLK_MM_GALS_COMM1 5 +#define CLK_MM_GALS_CCU2MM 6 +#define CLK_MM_GALS_IPU12MM 7 +#define CLK_MM_GALS_IMG2MM 8 +#define CLK_MM_GALS_CAM2MM 9 +#define CLK_MM_GALS_IPU2MM 10 +#define CLK_MM_MDP_DL_TXCK 11 +#define CLK_MM_IPU_DL_TXCK 12 +#define CLK_MM_MDP_RDMA0 13 +#define CLK_MM_MDP_RDMA1 14 +#define CLK_MM_MDP_RSZ0 15 +#define CLK_MM_MDP_RSZ1 16 +#define CLK_MM_MDP_TDSHP 17 +#define CLK_MM_MDP_WROT0 18 +#define CLK_MM_FAKE_ENG 19 +#define CLK_MM_DISP_OVL0 20 +#define CLK_MM_DISP_OVL0_2L 21 +#define CLK_MM_DISP_OVL1_2L 22 +#define CLK_MM_DISP_RDMA0 23 +#define CLK_MM_DISP_RDMA1 24 +#define CLK_MM_DISP_WDMA0 25 +#define CLK_MM_DISP_COLOR0 26 +#define CLK_MM_DISP_CCORR0 27 +#define CLK_MM_DISP_AAL0 28 +#define CLK_MM_DISP_GAMMA0 29 +#define CLK_MM_DISP_DITHER0 30 +#define CLK_MM_DISP_SPLIT 31 +#define CLK_MM_DSI0_MM_CK 32 +#define CLK_MM_DSI0_IF_CK 33 +#define CLK_MM_DPI_MM_CK 34 +#define CLK_MM_DPI_IF_CK 35 +#define CLK_MM_FAKE_ENG2 36 +#define CLK_MM_MDP_DL_RX_CK 37 +#define CLK_MM_IPU_DL_RX_CK 38 +#define CLK_MM_26M 39 +#define CLK_MM_MM_R2Y 40 +#define CLK_MM_DISP_RSZ 41 +#define CLK_MM_MDP_WDMA0 42 +#define CLK_MM_MDP_AAL 43 +#define CLK_MM_MDP_HDR 44 +#define CLK_MM_DBI_MM_CK 45 +#define CLK_MM_DBI_IF_CK 46 +#define CLK_MM_MDP_WROT1 47 +#define CLK_MM_DISP_POSTMASK0 48 +#define CLK_MM_DISP_HRT_BW 49 +#define CLK_MM_DISP_OVL_FBDC 50 +#define CLK_MM_NR_CLK 51 + +/* VDEC_GCON */ +#define CLK_VDEC_VDEC 1 +#define CLK_VDEC_LARB1 2 +#define CLK_VDEC_GCON_NR_CLK 3 + +/* VENC_GCON */ +#define CLK_VENC_GCON_LARB 1 +#define CLK_VENC_GCON_VENC 2 +#define CLK_VENC_GCON_JPGENC 3 +#define CLK_VENC_GCON_GALS 4 +#define CLK_VENC_GCON_NR_CLK 5 + +/* AUD */ +#define CLK_AUD_AFE 1 +#define CLK_AUD_22M 2 +#define CLK_AUD_24M 3 +#define CLK_AUD_APLL2_TUNER 4 +#define CLK_AUD_APLL_TUNER 5 +#define CLK_AUD_TDM 6 +#define CLK_AUD_ADC 7 +#define CLK_AUD_DAC 8 +#define CLK_AUD_DAC_PREDIS 9 +#define CLK_AUD_TML 10 +#define CLK_AUD_NLE 11 +#define CLK_AUD_I2S1_BCLK_SW 12 +#define CLK_AUD_I2S2_BCLK_SW 13 +#define CLK_AUD_I2S3_BCLK_SW 14 +#define CLK_AUD_I2S4_BCLK_SW 15 +#define CLK_AUD_I2S5_BCLK_SW 16 +#define CLK_AUD_CONN_I2S_ASRC 17 +#define CLK_AUD_GENERAL1_ASRC 18 +#define CLK_AUD_GENERAL2_ASRC 19 +#define CLK_AUD_DAC_HIRES 20 +#define CLK_AUD_PDN_ADDA6_ADC 21 +#define CLK_AUD_ADC_HIRES 22 +#define CLK_AUD_ADC_HIRES_TML 23 +#define CLK_AUD_ADDA6_ADC_HIRES 24 +#define CLK_AUD_3RD_DAC 25 +#define CLK_AUD_3RD_DAC_PREDIS 26 +#define CLK_AUD_3RD_DAC_TML 27 +#define CLK_AUD_3RD_DAC_HIRES 28 +#define CLK_AUD_NR_CLK 29 + +#endif /* _DT_BINDINGS_CLK_MT6779_H */ diff --git a/include/dt-bindings/clock/mt8183-clk.h b/include/dt-bindings/clock/mt8183-clk.h index 0046506eb24c3a..a7b470b0ec8ab4 100644 --- a/include/dt-bindings/clock/mt8183-clk.h +++ b/include/dt-bindings/clock/mt8183-clk.h @@ -284,6 +284,10 @@ #define CLK_INFRA_FBIST2FPC 100 #define CLK_INFRA_NR_CLK 101 +/* PERICFG */ +#define CLK_PERI_AXI 0 +#define CLK_PERI_NR_CLK 1 + /* MFGCFG */ #define CLK_MFG_BG3D 0 #define CLK_MFG_NR_CLK 1 diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h index f3283957f48ddf..e5411938983cd2 100644 --- a/include/dt-bindings/clock/omap5.h +++ b/include/dt-bindings/clock/omap5.h @@ -89,6 +89,9 @@ /* dss clocks */ #define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +/* gpu clocks */ +#define OMAP5_GPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + /* l3init clocks */ #define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) #define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h index 2cd62c98561f5c..bc305154334706 100644 --- a/include/dt-bindings/clock/qcom,gcc-qcs404.h +++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h @@ -146,6 +146,8 @@ #define GCC_MDP_TBU_CLK 138 #define GCC_QDSS_DAP_CLK 139 #define GCC_DCC_XO_CLK 140 +#define GCC_WCSS_Q6_AHB_CLK 141 +#define GCC_WCSS_Q6_AXIM_CLK 142 #define GCC_CDSP_CFG_AHB_CLK 143 #define GCC_BIMC_CDSP_CLK 144 #define GCC_CDSP_TBU_CLK 145 @@ -173,5 +175,6 @@ #define GCC_PCIE_0_CORE_STICKY_ARES 19 #define GCC_PCIE_0_SLEEP_ARES 20 #define GCC_PCIE_0_PIPE_ARES 21 +#define GCC_WDSP_RESTART 22 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h new file mode 100644 index 00000000000000..90d60ef94c64a1 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 1 +#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 3 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 4 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 6 +#define GCC_BOOT_ROM_AHB_CLK 7 +#define GCC_CAMERA_AHB_CLK 8 +#define GCC_CAMERA_HF_AXI_CLK 9 +#define GCC_CAMERA_SF_AXI_CLK 10 +#define GCC_CAMERA_XO_CLK 11 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13 +#define GCC_CPUSS_AHB_CLK 14 +#define GCC_CPUSS_AHB_CLK_SRC 15 +#define GCC_CPUSS_DVM_BUS_CLK 16 +#define GCC_CPUSS_GNOC_CLK 17 +#define GCC_CPUSS_RBCPR_CLK 18 +#define GCC_DDRSS_GPU_AXI_CLK 19 +#define GCC_DISP_AHB_CLK 20 +#define GCC_DISP_HF_AXI_CLK 21 +#define GCC_DISP_SF_AXI_CLK 22 +#define GCC_DISP_XO_CLK 23 +#define GCC_EMAC_AXI_CLK 24 +#define GCC_EMAC_PTP_CLK 25 +#define GCC_EMAC_PTP_CLK_SRC 26 +#define GCC_EMAC_RGMII_CLK 27 +#define GCC_EMAC_RGMII_CLK_SRC 28 +#define GCC_EMAC_SLV_AHB_CLK 29 +#define GCC_GP1_CLK 30 +#define GCC_GP1_CLK_SRC 31 +#define GCC_GP2_CLK 32 +#define GCC_GP2_CLK_SRC 33 +#define GCC_GP3_CLK 34 +#define GCC_GP3_CLK_SRC 35 +#define GCC_GPU_CFG_AHB_CLK 36 +#define GCC_GPU_GPLL0_CLK_SRC 37 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 38 +#define GCC_GPU_IREF_CLK 39 +#define GCC_GPU_MEMNOC_GFX_CLK 40 +#define GCC_GPU_SNOC_DVM_GFX_CLK 41 +#define GCC_NPU_AT_CLK 42 +#define GCC_NPU_AXI_CLK 43 +#define GCC_NPU_CFG_AHB_CLK 44 +#define GCC_NPU_GPLL0_CLK_SRC 45 +#define GCC_NPU_GPLL0_DIV_CLK_SRC 46 +#define GCC_NPU_TRIG_CLK 47 +#define GCC_PCIE0_PHY_REFGEN_CLK 48 +#define GCC_PCIE1_PHY_REFGEN_CLK 49 +#define GCC_PCIE_0_AUX_CLK 50 +#define GCC_PCIE_0_AUX_CLK_SRC 51 +#define GCC_PCIE_0_CFG_AHB_CLK 52 +#define GCC_PCIE_0_CLKREF_CLK 53 +#define GCC_PCIE_0_MSTR_AXI_CLK 54 +#define GCC_PCIE_0_PIPE_CLK 55 +#define GCC_PCIE_0_SLV_AXI_CLK 56 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57 +#define GCC_PCIE_1_AUX_CLK 58 +#define GCC_PCIE_1_AUX_CLK_SRC 59 +#define GCC_PCIE_1_CFG_AHB_CLK 60 +#define GCC_PCIE_1_CLKREF_CLK 61 +#define GCC_PCIE_1_MSTR_AXI_CLK 62 +#define GCC_PCIE_1_PIPE_CLK 63 +#define GCC_PCIE_1_SLV_AXI_CLK 64 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 65 +#define GCC_PCIE_PHY_AUX_CLK 66 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 67 +#define GCC_PDM2_CLK 68 +#define GCC_PDM2_CLK_SRC 69 +#define GCC_PDM_AHB_CLK 70 +#define GCC_PDM_XO4_CLK 71 +#define GCC_PRNG_AHB_CLK 72 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 73 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 74 +#define GCC_QMIP_DISP_AHB_CLK 75 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 76 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 77 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 78 +#define GCC_QSPI_CORE_CLK 79 +#define GCC_QSPI_CORE_CLK_SRC 80 +#define GCC_QUPV3_WRAP0_S0_CLK 81 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 82 +#define GCC_QUPV3_WRAP0_S1_CLK 83 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 84 +#define GCC_QUPV3_WRAP0_S2_CLK 85 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 86 +#define GCC_QUPV3_WRAP0_S3_CLK 87 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 88 +#define GCC_QUPV3_WRAP0_S4_CLK 89 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 90 +#define GCC_QUPV3_WRAP0_S5_CLK 91 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 92 +#define GCC_QUPV3_WRAP0_S6_CLK 93 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 94 +#define GCC_QUPV3_WRAP0_S7_CLK 95 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S0_CLK 97 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98 +#define GCC_QUPV3_WRAP1_S1_CLK 99 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100 +#define GCC_QUPV3_WRAP1_S2_CLK 101 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102 +#define GCC_QUPV3_WRAP1_S3_CLK 103 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104 +#define GCC_QUPV3_WRAP1_S4_CLK 105 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106 +#define GCC_QUPV3_WRAP1_S5_CLK 107 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108 +#define GCC_QUPV3_WRAP2_S0_CLK 109 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110 +#define GCC_QUPV3_WRAP2_S1_CLK 111 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112 +#define GCC_QUPV3_WRAP2_S2_CLK 113 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114 +#define GCC_QUPV3_WRAP2_S3_CLK 115 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116 +#define GCC_QUPV3_WRAP2_S4_CLK 117 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118 +#define GCC_QUPV3_WRAP2_S5_CLK 119 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 121 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 122 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 123 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 124 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 125 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 126 +#define GCC_SDCC2_AHB_CLK 127 +#define GCC_SDCC2_APPS_CLK 128 +#define GCC_SDCC2_APPS_CLK_SRC 129 +#define GCC_SDCC4_AHB_CLK 130 +#define GCC_SDCC4_APPS_CLK 131 +#define GCC_SDCC4_APPS_CLK_SRC 132 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 133 +#define GCC_TSIF_AHB_CLK 134 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 135 +#define GCC_TSIF_REF_CLK 136 +#define GCC_TSIF_REF_CLK_SRC 137 +#define GCC_UFS_CARD_AHB_CLK 138 +#define GCC_UFS_CARD_AXI_CLK 139 +#define GCC_UFS_CARD_AXI_CLK_SRC 140 +#define GCC_UFS_CARD_AXI_HW_CTL_CLK 141 +#define GCC_UFS_CARD_CLKREF_CLK 142 +#define GCC_UFS_CARD_ICE_CORE_CLK 143 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 144 +#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 145 +#define GCC_UFS_CARD_PHY_AUX_CLK 146 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 147 +#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 148 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 149 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 150 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 151 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 152 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 153 +#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 154 +#define GCC_UFS_MEM_CLKREF_CLK 155 +#define GCC_UFS_PHY_AHB_CLK 156 +#define GCC_UFS_PHY_AXI_CLK 157 +#define GCC_UFS_PHY_AXI_CLK_SRC 158 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 159 +#define GCC_UFS_PHY_ICE_CORE_CLK 160 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 162 +#define GCC_UFS_PHY_PHY_AUX_CLK 163 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 164 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 165 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 166 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 167 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 169 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 170 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 171 +#define GCC_USB30_PRIM_MASTER_CLK 172 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 173 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 174 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 175 +#define GCC_USB30_PRIM_SLEEP_CLK 176 +#define GCC_USB30_SEC_MASTER_CLK 177 +#define GCC_USB30_SEC_MASTER_CLK_SRC 178 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 179 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 180 +#define GCC_USB30_SEC_SLEEP_CLK 181 +#define GCC_USB3_PRIM_CLKREF_CLK 182 +#define GCC_USB3_PRIM_PHY_AUX_CLK 183 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 184 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 185 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 186 +#define GCC_USB3_SEC_CLKREF_CLK 187 +#define GCC_USB3_SEC_PHY_AUX_CLK 188 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 189 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 190 +#define GCC_USB3_SEC_PHY_PIPE_CLK 191 +#define GCC_VIDEO_AHB_CLK 192 +#define GCC_VIDEO_AXI0_CLK 193 +#define GCC_VIDEO_AXI1_CLK 194 +#define GCC_VIDEO_AXIC_CLK 195 +#define GCC_VIDEO_XO_CLK 196 +#define GPLL0 197 +#define GPLL0_OUT_EVEN 198 +#define GPLL7 199 +#define GPLL9 200 + +/* Reset clocks */ +#define GCC_EMAC_BCR 0 +#define GCC_GPU_BCR 1 +#define GCC_MMSS_BCR 2 +#define GCC_NPU_BCR 3 +#define GCC_PCIE_0_BCR 4 +#define GCC_PCIE_0_PHY_BCR 5 +#define GCC_PCIE_1_BCR 6 +#define GCC_PCIE_1_PHY_BCR 7 +#define GCC_PCIE_PHY_BCR 8 +#define GCC_PDM_BCR 9 +#define GCC_PRNG_BCR 10 +#define GCC_QSPI_BCR 11 +#define GCC_QUPV3_WRAPPER_0_BCR 12 +#define GCC_QUPV3_WRAPPER_1_BCR 13 +#define GCC_QUPV3_WRAPPER_2_BCR 14 +#define GCC_QUSB2PHY_PRIM_BCR 15 +#define GCC_QUSB2PHY_SEC_BCR 16 +#define GCC_USB3_PHY_PRIM_BCR 17 +#define GCC_USB3_DP_PHY_PRIM_BCR 18 +#define GCC_USB3_PHY_SEC_BCR 19 +#define GCC_USB3PHY_PHY_SEC_BCR 20 +#define GCC_SDCC2_BCR 21 +#define GCC_SDCC4_BCR 22 +#define GCC_TSIF_BCR 23 +#define GCC_UFS_CARD_BCR 24 +#define GCC_UFS_PHY_BCR 25 +#define GCC_USB30_PRIM_BCR 26 +#define GCC_USB30_SEC_BCR 27 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28 + +#endif diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h new file mode 100644 index 00000000000000..d97840f9ee2e15 --- /dev/null +++ b/include/dt-bindings/clock/rk3308-cru.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 Rockchip Electronics Co. Ltd. + * Author: Finley Xiao + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_VPLL0 3 +#define PLL_VPLL1 4 +#define ARMCLK 5 + +/* sclk (special clocks) */ +#define USB480M 14 +#define SCLK_RTC32K 15 +#define SCLK_PVTM_CORE 16 +#define SCLK_UART0 17 +#define SCLK_UART1 18 +#define SCLK_UART2 19 +#define SCLK_UART3 20 +#define SCLK_UART4 21 +#define SCLK_I2C0 22 +#define SCLK_I2C1 23 +#define SCLK_I2C2 24 +#define SCLK_I2C3 25 +#define SCLK_PWM0 26 +#define SCLK_SPI0 27 +#define SCLK_SPI1 28 +#define SCLK_SPI2 29 +#define SCLK_TIMER0 30 +#define SCLK_TIMER1 31 +#define SCLK_TIMER2 32 +#define SCLK_TIMER3 33 +#define SCLK_TIMER4 34 +#define SCLK_TIMER5 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_OTP 38 +#define SCLK_OTP_USR 39 +#define SCLK_CPU_BOOST 40 +#define SCLK_CRYPTO 41 +#define SCLK_CRYPTO_APK 42 +#define SCLK_NANDC_DIV 43 +#define SCLK_NANDC_DIV50 44 +#define SCLK_NANDC 45 +#define SCLK_SDMMC_DIV 46 +#define SCLK_SDMMC_DIV50 47 +#define SCLK_SDMMC 48 +#define SCLK_SDMMC_DRV 49 +#define SCLK_SDMMC_SAMPLE 50 +#define SCLK_SDIO_DIV 51 +#define SCLK_SDIO_DIV50 52 +#define SCLK_SDIO 53 +#define SCLK_SDIO_DRV 54 +#define SCLK_SDIO_SAMPLE 55 +#define SCLK_EMMC_DIV 56 +#define SCLK_EMMC_DIV50 57 +#define SCLK_EMMC 58 +#define SCLK_EMMC_DRV 59 +#define SCLK_EMMC_SAMPLE 60 +#define SCLK_SFC 61 +#define SCLK_OTG_ADP 62 +#define SCLK_MAC_SRC 63 +#define SCLK_MAC 64 +#define SCLK_MAC_REF 65 +#define SCLK_MAC_RX_TX 66 +#define SCLK_MAC_RMII 67 +#define SCLK_DDR_MON_TIMER 68 +#define SCLK_DDR_MON 69 +#define SCLK_DDRCLK 70 +#define SCLK_PMU 71 +#define SCLK_USBPHY_REF 72 +#define SCLK_WIFI 73 +#define SCLK_PVTM_PMU 74 +#define SCLK_PDM 75 +#define SCLK_I2S0_8CH_TX 76 +#define SCLK_I2S0_8CH_TX_OUT 77 +#define SCLK_I2S0_8CH_RX 78 +#define SCLK_I2S0_8CH_RX_OUT 79 +#define SCLK_I2S1_8CH_TX 80 +#define SCLK_I2S1_8CH_TX_OUT 81 +#define SCLK_I2S1_8CH_RX 82 +#define SCLK_I2S1_8CH_RX_OUT 83 +#define SCLK_I2S2_8CH_TX 84 +#define SCLK_I2S2_8CH_TX_OUT 85 +#define SCLK_I2S2_8CH_RX 86 +#define SCLK_I2S2_8CH_RX_OUT 87 +#define SCLK_I2S3_8CH_TX 88 +#define SCLK_I2S3_8CH_TX_OUT 89 +#define SCLK_I2S3_8CH_RX 90 +#define SCLK_I2S3_8CH_RX_OUT 91 +#define SCLK_I2S0_2CH 92 +#define SCLK_I2S0_2CH_OUT 93 +#define SCLK_I2S1_2CH 94 +#define SCLK_I2S1_2CH_OUT 95 +#define SCLK_SPDIF_TX_DIV 96 +#define SCLK_SPDIF_TX_DIV50 97 +#define SCLK_SPDIF_TX 98 +#define SCLK_SPDIF_RX_DIV 99 +#define SCLK_SPDIF_RX_DIV50 100 +#define SCLK_SPDIF_RX 101 +#define SCLK_I2S0_8CH_TX_MUX 102 +#define SCLK_I2S0_8CH_RX_MUX 103 +#define SCLK_I2S1_8CH_TX_MUX 104 +#define SCLK_I2S1_8CH_RX_MUX 105 +#define SCLK_I2S2_8CH_TX_MUX 106 +#define SCLK_I2S2_8CH_RX_MUX 107 +#define SCLK_I2S3_8CH_TX_MUX 108 +#define SCLK_I2S3_8CH_RX_MUX 109 +#define SCLK_I2S0_8CH_TX_SRC 110 +#define SCLK_I2S0_8CH_RX_SRC 111 +#define SCLK_I2S1_8CH_TX_SRC 112 +#define SCLK_I2S1_8CH_RX_SRC 113 +#define SCLK_I2S2_8CH_TX_SRC 114 +#define SCLK_I2S2_8CH_RX_SRC 115 +#define SCLK_I2S3_8CH_TX_SRC 116 +#define SCLK_I2S3_8CH_RX_SRC 117 +#define SCLK_I2S0_2CH_SRC 118 +#define SCLK_I2S1_2CH_SRC 119 +#define SCLK_PWM1 120 +#define SCLK_PWM2 121 +#define SCLK_OWIRE 122 + +/* dclk */ +#define DCLK_VOP 125 + +/* aclk */ +#define ACLK_BUS_SRC 130 +#define ACLK_BUS 131 +#define ACLK_PERI_SRC 132 +#define ACLK_PERI 133 +#define ACLK_MAC 134 +#define ACLK_CRYPTO 135 +#define ACLK_VOP 136 +#define ACLK_GIC 137 +#define ACLK_DMAC0 138 +#define ACLK_DMAC1 139 + +/* hclk */ +#define HCLK_BUS 150 +#define HCLK_PERI 151 +#define HCLK_AUDIO 152 +#define HCLK_NANDC 153 +#define HCLK_SDMMC 154 +#define HCLK_SDIO 155 +#define HCLK_EMMC 156 +#define HCLK_SFC 157 +#define HCLK_OTG 158 +#define HCLK_HOST 159 +#define HCLK_HOST_ARB 160 +#define HCLK_PDM 161 +#define HCLK_SPDIFTX 162 +#define HCLK_SPDIFRX 163 +#define HCLK_I2S0_8CH 164 +#define HCLK_I2S1_8CH 165 +#define HCLK_I2S2_8CH 166 +#define HCLK_I2S3_8CH 167 +#define HCLK_I2S0_2CH 168 +#define HCLK_I2S1_2CH 169 +#define HCLK_VAD 170 +#define HCLK_CRYPTO 171 +#define HCLK_VOP 172 + +/* pclk */ +#define PCLK_BUS 190 +#define PCLK_DDR 191 +#define PCLK_PERI 192 +#define PCLK_PMU 193 +#define PCLK_AUDIO 194 +#define PCLK_MAC 195 +#define PCLK_ACODEC 196 +#define PCLK_UART0 197 +#define PCLK_UART1 198 +#define PCLK_UART2 199 +#define PCLK_UART3 200 +#define PCLK_UART4 201 +#define PCLK_I2C0 202 +#define PCLK_I2C1 203 +#define PCLK_I2C2 204 +#define PCLK_I2C3 205 +#define PCLK_PWM0 206 +#define PCLK_SPI0 207 +#define PCLK_SPI1 208 +#define PCLK_SPI2 209 +#define PCLK_SARADC 210 +#define PCLK_TSADC 211 +#define PCLK_TIMER 212 +#define PCLK_OTP_NS 213 +#define PCLK_WDT 214 +#define PCLK_GPIO0 215 +#define PCLK_GPIO1 216 +#define PCLK_GPIO2 217 +#define PCLK_GPIO3 218 +#define PCLK_GPIO4 219 +#define PCLK_SGRF 220 +#define PCLK_GRF 221 +#define PCLK_USBSD_DET 222 +#define PCLK_DDR_UPCTL 223 +#define PCLK_DDR_MON 224 +#define PCLK_DDRPHY 225 +#define PCLK_DDR_STDBY 226 +#define PCLK_USB_GRF 227 +#define PCLK_CRU 228 +#define PCLK_OTP_PHY 229 +#define PCLK_CPU_BOOST 230 +#define PCLK_PWM1 231 +#define PCLK_PWM2 232 +#define PCLK_CAN 233 +#define PCLK_OWIRE 234 + +#define CLK_NR_CLKS (PCLK_OWIRE + 1) + +/* soft-reset indices */ + +/* cru_softrst_con0 */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NOC 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +/* cru_softrst_con1 */ +#define SRST_DAP 16 +#define SRST_CORE_PVTM 17 +#define SRST_CORE_PRF 18 +#define SRST_CORE_GRF 19 +#define SRST_DDRUPCTL 20 +#define SRST_DDRUPCTL_P 22 +#define SRST_MSCH 23 +#define SRST_DDRMON_P 25 +#define SRST_DDRSTDBY_P 26 +#define SRST_DDRSTDBY 27 +#define SRST_DDRPHY 28 +#define SRST_DDRPHY_DIV 29 +#define SRST_DDRPHY_P 30 + +/* cru_softrst_con2 */ +#define SRST_BUS_NIU_H 32 +#define SRST_USB_NIU_P 33 +#define SRST_CRYPTO_A 34 +#define SRST_CRYPTO_H 35 +#define SRST_CRYPTO 36 +#define SRST_CRYPTO_APK 37 +#define SRST_VOP_A 38 +#define SRST_VOP_H 39 +#define SRST_VOP_D 40 +#define SRST_INTMEM_A 41 +#define SRST_ROM_H 42 +#define SRST_GIC_A 43 +#define SRST_UART0_P 44 +#define SRST_UART0 45 +#define SRST_UART1_P 46 +#define SRST_UART1 47 + +/* cru_softrst_con3 */ +#define SRST_UART2_P 48 +#define SRST_UART2 49 +#define SRST_UART3_P 50 +#define SRST_UART3 51 +#define SRST_UART4_P 52 +#define SRST_UART4 53 +#define SRST_I2C0_P 54 +#define SRST_I2C0 55 +#define SRST_I2C1_P 56 +#define SRST_I2C1 57 +#define SRST_I2C2_P 58 +#define SRST_I2C2 59 +#define SRST_I2C3_P 60 +#define SRST_I2C3 61 +#define SRST_PWM0_P 62 +#define SRST_PWM0 63 + +/* cru_softrst_con4 */ +#define SRST_SPI0_P 64 +#define SRST_SPI0 65 +#define SRST_SPI1_P 66 +#define SRST_SPI1 67 +#define SRST_SPI2_P 68 +#define SRST_SPI2 69 +#define SRST_SARADC_P 70 +#define SRST_TSADC_P 71 +#define SRST_TSADC 72 +#define SRST_TIMER0_P 73 +#define SRST_TIMER0 74 +#define SRST_TIMER1 75 +#define SRST_TIMER2 76 +#define SRST_TIMER3 77 +#define SRST_TIMER4 78 +#define SRST_TIMER5 79 + +/* cru_softrst_con5 */ +#define SRST_OTP_NS_P 80 +#define SRST_OTP_NS_SBPI 81 +#define SRST_OTP_NS_USR 82 +#define SRST_OTP_PHY_P 83 +#define SRST_OTP_PHY 84 +#define SRST_GPIO0_P 86 +#define SRST_GPIO1_P 87 +#define SRST_GPIO2_P 88 +#define SRST_GPIO3_P 89 +#define SRST_GPIO4_P 90 +#define SRST_GRF_P 91 +#define SRST_USBSD_DET_P 92 +#define SRST_PMU 93 +#define SRST_PMU_PVTM 94 +#define SRST_USB_GRF_P 95 + +/* cru_softrst_con6 */ +#define SRST_CPU_BOOST 96 +#define SRST_CPU_BOOST_P 97 +#define SRST_PWM1_P 98 +#define SRST_PWM1 99 +#define SRST_PWM2_P 100 +#define SRST_PWM2 101 +#define SRST_PERI_NIU_A 104 +#define SRST_PERI_NIU_H 105 +#define SRST_PERI_NIU_p 106 +#define SRST_USB2OTG_H 107 +#define SRST_USB2OTG 108 +#define SRST_USB2OTG_ADP 109 +#define SRST_USB2HOST_H 110 +#define SRST_USB2HOST_ARB_H 111 + +/* cru_softrst_con7 */ +#define SRST_USB2HOST_AUX_H 112 +#define SRST_USB2HOST_EHCI 113 +#define SRST_USB2HOST 114 +#define SRST_USBPHYPOR 115 +#define SRST_UTMI0 116 +#define SRST_UTMI1 117 +#define SRST_SDIO_H 118 +#define SRST_EMMC_H 119 +#define SRST_SFC_H 120 +#define SRST_SFC 121 +#define SRST_SD_H 122 +#define SRST_NANDC_H 123 +#define SRST_NANDC_N 124 +#define SRST_MAC_A 125 +#define SRST_CAN_P 126 +#define SRST_OWIRE_P 127 + +/* cru_softrst_con8 */ +#define SRST_AUDIO_NIU_H 128 +#define SRST_AUDIO_NIU_P 129 +#define SRST_PDM_H 130 +#define SRST_PDM_M 131 +#define SRST_SPDIFTX_H 132 +#define SRST_SPDIFTX_M 133 +#define SRST_SPDIFRX_H 134 +#define SRST_SPDIFRX_M 135 +#define SRST_I2S0_8CH_H 136 +#define SRST_I2S0_8CH_TX_M 137 +#define SRST_I2S0_8CH_RX_M 138 +#define SRST_I2S1_8CH_H 139 +#define SRST_I2S1_8CH_TX_M 140 +#define SRST_I2S1_8CH_RX_M 141 +#define SRST_I2S2_8CH_H 142 +#define SRST_I2S2_8CH_TX_M 143 + +/* cru_softrst_con9 */ +#define SRST_I2S2_8CH_RX_M 144 +#define SRST_I2S3_8CH_H 145 +#define SRST_I2S3_8CH_TX_M 146 +#define SRST_I2S3_8CH_RX_M 147 +#define SRST_I2S0_2CH_H 148 +#define SRST_I2S0_2CH_M 149 +#define SRST_I2S1_2CH_H 150 +#define SRST_I2S1_2CH_M 151 +#define SRST_VAD_H 152 +#define SRST_ACODEC_P 153 + +#endif diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h index c0d5d5599c87f5..014ac6123d1760 100644 --- a/include/dt-bindings/clock/sun8i-v3s-ccu.h +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -104,4 +104,8 @@ #define CLK_MIPI_CSI 73 +/* Clocks not available on V3s */ +#define CLK_BUS_I2S0 75 +#define CLK_I2S0 76 + #endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h index 45e11b6170ca90..499de62165811d 100644 --- a/include/dt-bindings/pinctrl/k3.h +++ b/include/dt-bindings/pinctrl/k3.h @@ -32,4 +32,7 @@ #define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) #define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) + #endif diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset-controller/mt8183-resets.h new file mode 100644 index 00000000000000..8804e34ebdd409 --- /dev/null +++ b/include/dt-bindings/reset-controller/mt8183-resets.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Yong Liang + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8183 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8183 + +/* INFRACFG AO resets */ +#define MT8183_INFRACFG_AO_THERM_SW_RST 0 +#define MT8183_INFRACFG_AO_USB_TOP_SW_RST 1 +#define MT8183_INFRACFG_AO_MM_IOMMU_SW_RST 3 +#define MT8183_INFRACFG_AO_MSDC3_SW_RST 4 +#define MT8183_INFRACFG_AO_MSDC2_SW_RST 5 +#define MT8183_INFRACFG_AO_MSDC1_SW_RST 6 +#define MT8183_INFRACFG_AO_MSDC0_SW_RST 7 +#define MT8183_INFRACFG_AO_APDMA_SW_RST 9 +#define MT8183_INFRACFG_AO_MIMP_D_SW_RST 10 +#define MT8183_INFRACFG_AO_BTIF_SW_RST 12 +#define MT8183_INFRACFG_AO_DISP_PWM_SW_RST 14 +#define MT8183_INFRACFG_AO_AUXADC_SW_RST 15 + +#define MT8183_INFRACFG_AO_IRTX_SW_RST 32 +#define MT8183_INFRACFG_AO_SPI0_SW_RST 33 +#define MT8183_INFRACFG_AO_I2C0_SW_RST 34 +#define MT8183_INFRACFG_AO_I2C1_SW_RST 35 +#define MT8183_INFRACFG_AO_I2C2_SW_RST 36 +#define MT8183_INFRACFG_AO_I2C3_SW_RST 37 +#define MT8183_INFRACFG_AO_UART0_SW_RST 38 +#define MT8183_INFRACFG_AO_UART1_SW_RST 39 +#define MT8183_INFRACFG_AO_UART2_SW_RST 40 +#define MT8183_INFRACFG_AO_PWM_SW_RST 41 +#define MT8183_INFRACFG_AO_SPI1_SW_RST 42 +#define MT8183_INFRACFG_AO_I2C4_SW_RST 43 +#define MT8183_INFRACFG_AO_DVFSP_SW_RST 44 +#define MT8183_INFRACFG_AO_SPI2_SW_RST 45 +#define MT8183_INFRACFG_AO_SPI3_SW_RST 46 +#define MT8183_INFRACFG_AO_UFSHCI_SW_RST 47 + +#define MT8183_INFRACFG_AO_PMIC_WRAP_SW_RST 64 +#define MT8183_INFRACFG_AO_SPM_SW_RST 65 +#define MT8183_INFRACFG_AO_USBSIF_SW_RST 66 +#define MT8183_INFRACFG_AO_KP_SW_RST 68 +#define MT8183_INFRACFG_AO_APXGPT_SW_RST 69 +#define MT8183_INFRACFG_AO_CLDMA_AO_SW_RST 70 +#define MT8183_INFRACFG_AO_UNIPRO_UFS_SW_RST 71 +#define MT8183_INFRACFG_AO_DX_CC_SW_RST 72 +#define MT8183_INFRACFG_AO_UFSPHY_SW_RST 73 + +#define MT8183_INFRACFG_AO_DX_CC_SEC_SW_RST 96 +#define MT8183_INFRACFG_AO_GCE_SW_RST 97 +#define MT8183_INFRACFG_AO_CLDMA_SW_RST 98 +#define MT8183_INFRACFG_AO_TRNG_SW_RST 99 +#define MT8183_INFRACFG_AO_AP_MD_CCIF_1_SW_RST 103 +#define MT8183_INFRACFG_AO_AP_MD_CCIF_SW_RST 104 +#define MT8183_INFRACFG_AO_I2C1_IMM_SW_RST 105 +#define MT8183_INFRACFG_AO_I2C1_ARB_SW_RST 106 +#define MT8183_INFRACFG_AO_I2C2_IMM_SW_RST 107 +#define MT8183_INFRACFG_AO_I2C2_ARB_SW_RST 108 +#define MT8183_INFRACFG_AO_I2C5_SW_RST 109 +#define MT8183_INFRACFG_AO_I2C5_IMM_SW_RST 110 +#define MT8183_INFRACFG_AO_I2C5_ARB_SW_RST 111 +#define MT8183_INFRACFG_AO_SPI4_SW_RST 112 +#define MT8183_INFRACFG_AO_SPI5_SW_RST 113 +#define MT8183_INFRACFG_AO_INFRA2MFGAXI_CBIP_CLAS_SW_RST 114 +#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M0_CBIP_GLAS_OUT_SW_RST 115 +#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M1_CBIP_GLAS_OUT_SW_RST 116 +#define MT8183_INFRACFG_AO_UFS_AES_SW_RST 117 +#define MT8183_INFRACFG_AO_CCU_I2C_IRQ_SW_RST 118 +#define MT8183_INFRACFG_AO_CCU_I2C_DMA_SW_RST 119 +#define MT8183_INFRACFG_AO_I2C6_SW_RST 120 +#define MT8183_INFRACFG_AO_CCU_GALS_SW_RST 121 +#define MT8183_INFRACFG_AO_IPU_GALS_SW_RST 122 +#define MT8183_INFRACFG_AO_CONN2AP_GALS_SW_RST 123 +#define MT8183_INFRACFG_AO_AP_MD_CCIF2_SW_RST 124 +#define MT8183_INFRACFG_AO_AP_MD_CCIF3_SW_RST 125 +#define MT8183_INFRACFG_AO_I2C7_SW_RST 126 +#define MT8183_INFRACFG_AO_I2C8_SW_RST 127 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8183 */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h index b58ef21a2e18d6..b6790173afd63f 100644 --- a/include/dt-bindings/reset/sun8i-v3s-ccu.h +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -75,4 +75,7 @@ #define RST_BUS_UART1 50 #define RST_BUS_UART2 51 +/* Reset lines not available on V3s */ +#define RST_BUS_I2S0 52 + #endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index dce5521a9bf6e1..2fdfe806136350 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -299,7 +299,8 @@ struct clk_init_data { * into the clk API * * @init: pointer to struct clk_init_data that contains the init data shared - * with the common clock framework. + * with the common clock framework. This pointer will be set to NULL once + * a clk_register() variant is called on this clk_hw pointer. */ struct clk_hw { struct clk_core *core; diff --git a/include/linux/clk.h b/include/linux/clk.h index 853a8f1813947c..18b7b95a8253c7 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -239,7 +239,8 @@ static inline int clk_prepare(struct clk *clk) return 0; } -static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check +clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) { might_sleep(); return 0; @@ -263,7 +264,8 @@ static inline void clk_unprepare(struct clk *clk) { might_sleep(); } -static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) +static inline void clk_bulk_unprepare(int num_clks, + const struct clk_bulk_data *clks) { might_sleep(); } @@ -820,7 +822,8 @@ static inline int clk_enable(struct clk *clk) return 0; } -static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check clk_bulk_enable(int num_clks, + const struct clk_bulk_data *clks) { return 0; } @@ -829,7 +832,7 @@ static inline void clk_disable(struct clk *clk) {} static inline void clk_bulk_disable(int num_clks, - struct clk_bulk_data *clks) {} + const struct clk_bulk_data *clks) {} static inline unsigned long clk_get_rate(struct clk *clk) { @@ -918,8 +921,8 @@ static inline void clk_disable_unprepare(struct clk *clk) clk_unprepare(clk); } -static inline int __must_check clk_bulk_prepare_enable(int num_clks, - struct clk_bulk_data *clks) +static inline int __must_check +clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) { int ret; @@ -934,7 +937,7 @@ static inline int __must_check clk_bulk_prepare_enable(int num_clks, } static inline void clk_bulk_disable_unprepare(int num_clks, - struct clk_bulk_data *clks) + const struct clk_bulk_data *clks) { clk_bulk_disable(num_clks, clks); clk_bulk_unprepare(num_clks, clks); diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h index 85f8cf9d12261d..eae9652c70cd25 100644 --- a/include/linux/clk/clk-conf.h +++ b/include/linux/clk/clk-conf.h @@ -4,6 +4,9 @@ * Sylwester Nawrocki */ +#ifndef __CLK_CONF_H +#define __CLK_CONF_H + #include struct device_node; @@ -17,3 +20,5 @@ static inline int of_clk_set_defaults(struct device_node *node, return 0; } #endif + +#endif /* __CLK_CONF_H */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 599c27b56c29a9..b056a40116da99 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -130,10 +130,6 @@ struct ftrace_likely_data { /* * Force always-inline if the user requests it so via the .config. - * GCC does not warn about unused static inline functions for - * -Wunused-function. This turns out to avoid the need for complex #ifdef - * directives. Suppress the warning in clang as well by using "unused" - * function attribute, which is redundant but not harmful for gcc. * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 * semantics rather than c99. This prevents multiple symbol definition errors @@ -144,15 +140,27 @@ struct ftrace_likely_data { */ #if !defined(CONFIG_OPTIMIZE_INLINING) #define inline inline __attribute__((__always_inline__)) __gnu_inline \ - __maybe_unused notrace + __inline_maybe_unused notrace #else #define inline inline __gnu_inline \ - __maybe_unused notrace + __inline_maybe_unused notrace #endif #define __inline__ inline #define __inline inline +/* + * GCC does not warn about unused static inline functions for -Wunused-function. + * Suppress the warning in clang as well by using __maybe_unused, but enable it + * for W=1 build. This will allow clang to find unused functions. Remove the + * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. + */ +#ifdef KBUILD_EXTRA_WARN1 +#define __inline_maybe_unused +#else +#define __inline_maybe_unused __maybe_unused +#endif + /* * Rather then using noinline to prevent stack consumption, use * noinline_for_stack instead. For documentation reasons. diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index f774c5eb9e3ccb..4664fc1871de68 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data) return -EOPNOTSUPP; } #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ + +#ifdef CONFIG_PROC_VMCORE +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted); +#else +static inline ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PROC_VMCORE */ + #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/export.h b/include/linux/export.h index fd8711ed9ac41d..7d8c112a8b61b6 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -18,9 +18,6 @@ extern struct module __this_module; #define THIS_MODULE ((struct module *)0) #endif -#ifdef CONFIG_MODULES - -#if defined(__KERNEL__) && !defined(__GENKSYMS__) #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ @@ -74,6 +71,12 @@ struct kernel_symbol { }; #endif +#ifdef __GENKSYMS__ + +#define ___EXPORT_SYMBOL(sym, sec) __GENKSYMS_EXPORT_SYMBOL(sym) + +#else + /* For every exported symbol, place a struct in the __ksymtab section */ #define ___EXPORT_SYMBOL(sym, sec) \ extern typeof(sym) sym; \ @@ -83,7 +86,9 @@ struct kernel_symbol { = #sym; \ __KSYMTAB_ENTRY(sym, sec) -#if defined(__DISABLE_EXPORTS) +#endif + +#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS) /* * Allow symbol exports to be disabled completely so that C code may @@ -117,37 +122,22 @@ struct kernel_symbol { #define __cond_export_sym_0(sym, sec) /* nothing */ #else -#define __EXPORT_SYMBOL ___EXPORT_SYMBOL -#endif -#define EXPORT_SYMBOL(sym) \ - __EXPORT_SYMBOL(sym, "") +#define __EXPORT_SYMBOL(sym, sec) ___EXPORT_SYMBOL(sym, sec) -#define EXPORT_SYMBOL_GPL(sym) \ - __EXPORT_SYMBOL(sym, "_gpl") - -#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ - __EXPORT_SYMBOL(sym, "_gpl_future") +#endif /* CONFIG_MODULES */ +#define EXPORT_SYMBOL(sym) __EXPORT_SYMBOL(sym, "") +#define EXPORT_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_gpl") +#define EXPORT_SYMBOL_GPL_FUTURE(sym) __EXPORT_SYMBOL(sym, "_gpl_future") #ifdef CONFIG_UNUSED_SYMBOLS -#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") -#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") #else #define EXPORT_UNUSED_SYMBOL(sym) #define EXPORT_UNUSED_SYMBOL_GPL(sym) #endif -#endif /* __GENKSYMS__ */ - -#else /* !CONFIG_MODULES... */ - -#define EXPORT_SYMBOL(sym) -#define EXPORT_SYMBOL_GPL(sym) -#define EXPORT_SYMBOL_GPL_FUTURE(sym) -#define EXPORT_UNUSED_SYMBOL(sym) -#define EXPORT_UNUSED_SYMBOL_GPL(sym) - -#endif /* CONFIG_MODULES */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/extable.h b/include/linux/extable.h index 41c5b3a25f67b5..81ecfaa83ad345 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -19,6 +19,8 @@ void trim_init_extable(struct module *m); /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); +const struct exception_table_entry * +search_kernel_exception_table(unsigned long addr); #ifdef CONFIG_MODULES /* For extable.c to search modules' exception tables. */ diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 470bd53a89df23..5c4a18a91f8985 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -18,23 +18,10 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } -static inline bool sev_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -static inline bool mem_encrypt_active(void) -{ - return sme_me_mask; -} - -static inline u64 sme_get_me_mask(void) -{ - return sme_me_mask; -} - #ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 0c587d4fc718bc..b5b7a3423ca816 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + #ifndef __TI_SYSC_DATA_H__ #define __TI_SYSC_DATA_H__ @@ -47,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_SGX BIT(18) #define SYSC_MODULE_QUIRK_HDQ1W BIT(17) #define SYSC_MODULE_QUIRK_I2C BIT(16) #define SYSC_MODULE_QUIRK_WDT BIT(15) @@ -70,7 +73,7 @@ struct sysc_regbits { /** * struct sysc_capabilities - capabilities for an interconnect target module - * + * @type: sysc type identifier for the module * @sysc_mask: bitmask of supported SYSCONFIG register bits * @regbits: bitmask of SYSCONFIG register bits * @mod_quirks: bitmask of module specific quirks @@ -85,8 +88,9 @@ struct sysc_capabilities { /** * struct sysc_config - configuration for an interconnect target module * @sysc_val: configured value for sysc register + * @syss_mask: configured mask value for SYSSTATUS register * @midlemodes: bitmask of supported master idle modes - * @sidlemodes: bitmask of supported master idle modes + * @sidlemodes: bitmask of supported slave idle modes * @srst_udelay: optional delay needed after OCP soft reset * @quirks: bitmask of enabled quirks */ diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index bae807eb2933f9..9aced11e90000c 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -9,6 +9,8 @@ #endif #ifdef CONFIG_PREEMPT #define MODULE_VERMAGIC_PREEMPT "preempt " +#elif defined(CONFIG_PREEMPT_RT) +#define MODULE_VERMAGIC_PREEMPT "preempt_rt " #else #define MODULE_VERMAGIC_PREEMPT "" #endif diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 8f10748dac79a1..9e843a147ead0a 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -295,15 +295,38 @@ struct vfio_region_info_cap_type { __u32 subtype; /* type specific */ }; +/* + * List of region types, global per bus driver. + * If you introduce a new type, please add it here. + */ + +/* PCI region type containing a PCI vendor part */ #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31) #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) +#define VFIO_REGION_TYPE_GFX (1) +#define VFIO_REGION_TYPE_CCW (2) + +/* sub-types for VFIO_REGION_TYPE_PCI_* */ -/* 8086 Vendor sub-types */ +/* 8086 vendor PCI sub-types */ #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1) #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) -#define VFIO_REGION_TYPE_GFX (1) +/* 10de vendor PCI sub-types */ +/* + * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. + */ +#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) + +/* 1014 vendor PCI sub-types */ +/* + * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU + * to do TLB invalidation on a GPU. + */ +#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) + +/* sub-types for VFIO_REGION_TYPE_GFX */ #define VFIO_REGION_SUBTYPE_GFX_EDID (1) /** @@ -353,25 +376,9 @@ struct vfio_region_gfx_edid { #define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2 }; -#define VFIO_REGION_TYPE_CCW (2) -/* ccw sub-types */ +/* sub-types for VFIO_REGION_TYPE_CCW */ #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1) -/* - * 10de vendor sub-type - * - * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. - */ -#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) - -/* - * 1014 vendor sub-type - * - * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU - * to do TLB invalidation on a GPU. - */ -#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) - /* * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped * which allows direct access to non-MSIX registers which happened to be within @@ -714,7 +721,31 @@ struct vfio_iommu_type1_info { __u32 argsz; __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ +#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ + __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __u32 cap_offset; /* Offset within info struct of first cap */ +}; + +/* + * The IOVA capability allows to report the valid IOVA range(s) + * excluding any non-relaxable reserved regions exposed by + * devices attached to the container. Any DMA map attempt + * outside the valid iova range will return error. + * + * The structures below define version 1 of this capability. + */ +#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE 1 + +struct vfio_iova_range { + __u64 start; + __u64 end; +}; + +struct vfio_iommu_type1_info_cap_iova_range { + struct vfio_info_cap_header header; + __u32 nr_iovas; + __u32 reserved; + struct vfio_iova_range iova_ranges[]; }; #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) diff --git a/init/Kconfig b/init/Kconfig index ec1021fd33712a..f4534c58342d7d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1234,20 +1234,26 @@ choice default CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE - bool "Optimize for performance" + bool "Optimize for performance (-O2)" help This is the default optimization level for the kernel, building with the "-O2" compiler flag for best performance and most helpful compile-time warnings. -config CC_OPTIMIZE_FOR_SIZE - bool "Optimize for size" +config CC_OPTIMIZE_FOR_PERFORMANCE_O3 + bool "Optimize more for performance (-O3)" + depends on ARC imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help - Enabling this option will pass "-Os" instead of "-O2" to - your compiler resulting in a smaller kernel. + Choosing this option will pass "-O3" to your compiler to optimize + the kernel yet more for performance. - If unsure, say N. +config CC_OPTIMIZE_FOR_SIZE + bool "Optimize for size (-Os)" + imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives + help + Choosing this option will pass "-Os" to your compiler resulting + in a smaller kernel. endchoice @@ -2014,6 +2020,14 @@ config MODVERSIONS make them incompatible with the kernel you are running. If unsure, say N. +config ASM_MODVERSIONS + bool + default HAVE_ASM_MODVERSIONS && MODVERSIONS + help + This enables module versioning for exported symbols also from + assembly. This can be enabled only when the target architecture + supports it. + config MODULE_REL_CRCS bool depends on MODVERSIONS diff --git a/init/Makefile b/init/Makefile index a3e5ce2bcf08bf..6246a06364d0f1 100644 --- a/init/Makefile +++ b/init/Makefile @@ -33,5 +33,6 @@ $(obj)/version.o: include/generated/compile.h silent_chk_compile.h = : include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" \ + "$(CONFIG_PREEMPT_RT)" "$(CC) $(KBUILD_CFLAGS)" diff --git a/kernel/Makefile b/kernel/Makefile index 48c5376d290a64..25f9d83d1bbf5d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -127,7 +127,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz -cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@ + cmd_genikh = $(BASH) $(srctree)/kernel/gen_kheaders.sh $@ $(obj)/kheaders_data.tar.xz: FORCE $(call cmd,genikh) diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 5cc608de688312..10f1187b39077e 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -787,11 +787,8 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) } /* - * GDB places a breakpoint at this function to know dynamically - * loaded objects. It's not defined static so that only one instance with this - * name exists in the kernel. + * GDB places a breakpoint at this function to know dynamically loaded objects. */ - static int module_event(struct notifier_block *self, unsigned long val, void *data) { diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 9ecfa37c7fbfc2..4567fe998c3067 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -830,7 +830,7 @@ static void parse_grep(const char *str) cp++; while (isspace(*cp)) cp++; - if (strncmp(cp, "grep ", 5)) { + if (!str_has_prefix(cp, "grep ")) { kdb_printf("invalid 'pipe', see grephelp\n"); return; } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 64a3d294f4b457..d9334f31a5afb0 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -345,12 +345,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, } EXPORT_SYMBOL(dma_free_attrs); -static inline void dma_check_mask(struct device *dev, u64 mask) -{ - if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) - dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); -} - int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); @@ -381,7 +375,6 @@ int dma_set_mask(struct device *dev, u64 mask) return -EIO; arch_dma_set_mask(dev, mask); - dma_check_mask(dev, mask); *dev->dma_mask = mask; return 0; } @@ -399,7 +392,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) if (!dma_supported(dev, mask)) return -EIO; - dma_check_mask(dev, mask); dev->coherent_dma_mask = mask; return 0; } diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 796a44f8ef5a90..673a2cdb2656b0 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -463,8 +463,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); if (mem_encrypt_active()) - pr_warn_once("%s is active and system is using DMA bounce buffers\n", - sme_active() ? "SME" : "SEV"); + pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); if (mapping_size > alloc_size) { dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", diff --git a/kernel/extable.c b/kernel/extable.c index e23cce6e6092af..f6c9406eec7db9 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -40,13 +40,20 @@ void __init sort_main_extable(void) } } +/* Given an address, look for it in the kernel exception table */ +const +struct exception_table_entry *search_kernel_exception_table(unsigned long addr) +{ + return search_extable(__start___ex_table, + __stop___ex_table - __start___ex_table, addr); +} + /* Given an address, look for it in the exception tables. */ const struct exception_table_entry *search_exception_tables(unsigned long addr) { const struct exception_table_entry *e; - e = search_extable(__start___ex_table, - __stop___ex_table - __start___ex_table, addr); + e = search_kernel_exception_table(addr); if (!e) e = search_module_extables(addr); return e; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1b66ccbb744a6a..53534aa258a60e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -961,9 +961,16 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) #ifdef CONFIG_KPROBES_ON_FTRACE static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { + .func = kprobe_ftrace_handler, + .flags = FTRACE_OPS_FL_SAVE_REGS, +}; + +static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { .func = kprobe_ftrace_handler, .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, }; + +static int kprobe_ipmodify_enabled; static int kprobe_ftrace_enabled; /* Must ensure p->addr is really on ftrace */ @@ -976,58 +983,75 @@ static int prepare_kprobe(struct kprobe *p) } /* Caller must lock kprobe_mutex */ -static int arm_kprobe_ftrace(struct kprobe *p) +static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, + int *cnt) { int ret = 0; - ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, - (unsigned long)p->addr, 0, 0); + ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); if (ret) { pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", p->addr, ret); return ret; } - if (kprobe_ftrace_enabled == 0) { - ret = register_ftrace_function(&kprobe_ftrace_ops); + if (*cnt == 0) { + ret = register_ftrace_function(ops); if (ret) { pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); goto err_ftrace; } } - kprobe_ftrace_enabled++; + (*cnt)++; return ret; err_ftrace: /* - * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a - * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental - * empty filter_hash which would undesirably trace all functions. + * At this point, sinec ops is not registered, we should be sefe from + * registering empty filter. */ - ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); + ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); return ret; } +static int arm_kprobe_ftrace(struct kprobe *p) +{ + bool ipmodify = (p->post_handler != NULL); + + return __arm_kprobe_ftrace(p, + ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, + ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); +} + /* Caller must lock kprobe_mutex */ -static int disarm_kprobe_ftrace(struct kprobe *p) +static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, + int *cnt) { int ret = 0; - if (kprobe_ftrace_enabled == 1) { - ret = unregister_ftrace_function(&kprobe_ftrace_ops); + if (*cnt == 1) { + ret = unregister_ftrace_function(ops); if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret)) return ret; } - kprobe_ftrace_enabled--; + (*cnt)--; - ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, - (unsigned long)p->addr, 1, 0); + ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", p->addr, ret); return ret; } + +static int disarm_kprobe_ftrace(struct kprobe *p) +{ + bool ipmodify = (p->post_handler != NULL); + + return __disarm_kprobe_ftrace(p, + ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, + ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); +} #else /* !CONFIG_KPROBES_ON_FTRACE */ #define prepare_kprobe(p) arch_prepare_kprobe(p) #define arm_kprobe_ftrace(p) (-ENODEV) diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 8dfd5021b9339a..7950a0356042a3 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -276,7 +276,7 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, int index = task->curr_ret_stack; int i; - if (ret != (unsigned long)return_to_handler) + if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) return ret; if (index < 0) @@ -294,7 +294,7 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, { int task_idx; - if (ret != (unsigned long)return_to_handler) + if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) return ret; task_idx = task->curr_ret_stack; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 356b848c697aa7..62a50bf399d636 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -6036,11 +6036,7 @@ clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) { struct ftrace_func_entry *entry; - if (ftrace_hash_empty(hash)) - return; - - entry = __ftrace_lookup_ip(hash, func->ip); - + entry = ftrace_lookup_ip(hash, func->ip); /* * Do not allow this rec to match again. * Yeah, it may waste some memory, but will be removed diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 947ba433865f41..252f79c435f810 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1854,7 +1854,7 @@ int __init register_tracer(struct tracer *type) return ret; } -void tracing_reset(struct trace_buffer *buf, int cpu) +static void tracing_reset_cpu(struct trace_buffer *buf, int cpu) { struct ring_buffer *buffer = buf->buffer; @@ -4251,7 +4251,7 @@ static int tracing_open(struct inode *inode, struct file *file) if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else - tracing_reset(trace_buf, cpu); + tracing_reset_cpu(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { @@ -4815,15 +4815,15 @@ static const char readme_msg[] = #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS - " dynamic_events\t\t- Add/remove/show the generic dynamic events\n" + " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS - " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" + " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS - " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" + " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) @@ -4848,7 +4848,7 @@ static const char readme_msg[] = #else "\t $stack, $stack, $retval, $comm,\n" #endif - "\t +|-[u]()\n" + "\t +|-[u](), \\imm-value, \\\"imm-string\"\n" "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b@/, ustring,\n" "\t \\[\\]\n" @@ -6742,7 +6742,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else - tracing_reset(&tr->max_buffer, iter->cpu_file); + tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); } break; } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 005f08629b8bed..26b0a08f3c7d46 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -677,7 +677,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu) int tracer_init(struct tracer *t, struct trace_array *tr); int tracing_is_enabled(void); -void tracing_reset(struct trace_buffer *buf, int cpu); void tracing_reset_online_cpus(struct trace_buffer *buf); void tracing_reset_current(int cpu); void tracing_reset_all_online_cpus(void); diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index fa100ed3b4de9d..a41fed46c285c3 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -47,6 +47,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) return -EINVAL; event++; } + argc--; argv++; p = strchr(event, '/'); if (p) { @@ -61,10 +62,13 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) for_each_dyn_event_safe(pos, n) { if (type && type != pos->ops) continue; - if (pos->ops->match(system, event, pos)) { - ret = pos->ops->free(pos); + if (!pos->ops->match(system, event, + argc, (const char **)argv, pos)) + continue; + + ret = pos->ops->free(pos); + if (ret) break; - } } mutex_unlock(&event_mutex); diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h index 8c334064e4d684..46898138d2dfd4 100644 --- a/kernel/trace/trace_dynevent.h +++ b/kernel/trace/trace_dynevent.h @@ -31,8 +31,9 @@ struct dyn_event; * @is_busy: Check whether given event is busy so that it can not be deleted. * Return true if it is busy, otherwides false. * @free: Delete the given event. Return 0 if success, otherwides error. - * @match: Check whether given event and system name match this event. - * Return true if it matches, otherwides false. + * @match: Check whether given event and system name match this event. The argc + * and argv is used for exact match. Return true if it matches, otherwides + * false. * * Except for @create, these methods are called under holding event_mutex. */ @@ -43,7 +44,7 @@ struct dyn_event_operations { bool (*is_busy)(struct dyn_event *ev); int (*free)(struct dyn_event *ev); bool (*match)(const char *system, const char *event, - struct dyn_event *ev); + int argc, const char **argv, struct dyn_event *ev); }; /* Register new dyn_event type -- must be called at first */ diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index ca6b0dff60c5be..9468bd8d44a2d6 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -13,6 +13,10 @@ #include #include +/* for gfp flag names */ +#include +#include + #include "tracing_map.h" #include "trace.h" #include "trace_dynevent.h" @@ -374,7 +378,7 @@ static int synth_event_show(struct seq_file *m, struct dyn_event *ev); static int synth_event_release(struct dyn_event *ev); static bool synth_event_is_busy(struct dyn_event *ev); static bool synth_event_match(const char *system, const char *event, - struct dyn_event *ev); + int argc, const char **argv, struct dyn_event *ev); static struct dyn_event_operations synth_event_ops = { .create = synth_event_create, @@ -422,7 +426,7 @@ static bool synth_event_is_busy(struct dyn_event *ev) } static bool synth_event_match(const char *system, const char *event, - struct dyn_event *ev) + int argc, const char **argv, struct dyn_event *ev) { struct synth_event *sev = to_synth_event(ev); @@ -752,6 +756,8 @@ static int synth_field_size(char *type) size = sizeof(unsigned long); else if (strcmp(type, "pid_t") == 0) size = sizeof(pid_t); + else if (strcmp(type, "gfp_t") == 0) + size = sizeof(gfp_t); else if (synth_field_is_string(type)) size = synth_field_string_size(type); @@ -792,6 +798,8 @@ static const char *synth_field_fmt(char *type) fmt = "%lu"; else if (strcmp(type, "pid_t") == 0) fmt = "%d"; + else if (strcmp(type, "gfp_t") == 0) + fmt = "%x"; else if (synth_field_is_string(type)) fmt = "%s"; @@ -834,9 +842,20 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, i == se->n_fields - 1 ? "" : " "); n_u64 += STR_VAR_LEN_MAX / sizeof(u64); } else { + struct trace_print_flags __flags[] = { + __def_gfpflag_names, {-1, NULL} }; + trace_seq_printf(s, print_fmt, se->fields[i]->name, entry->fields[n_u64], i == se->n_fields - 1 ? "" : " "); + + if (strcmp(se->fields[i]->type, "gfp_t") == 0) { + trace_seq_puts(s, " ("); + trace_print_flags_seq(s, "|", + entry->fields[n_u64], + __flags); + trace_seq_putc(s, ')'); + } n_u64++; } } @@ -2785,6 +2804,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data, return NULL; } + alias->var_ref_idx = var_ref->var_ref_idx; + return alias; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 9d483ad9bb6c40..a6697e28ddda00 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -39,7 +39,7 @@ static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev); static int trace_kprobe_release(struct dyn_event *ev); static bool trace_kprobe_is_busy(struct dyn_event *ev); static bool trace_kprobe_match(const char *system, const char *event, - struct dyn_event *ev); + int argc, const char **argv, struct dyn_event *ev); static struct dyn_event_operations trace_kprobe_ops = { .create = trace_kprobe_create, @@ -137,13 +137,36 @@ static bool trace_kprobe_is_busy(struct dyn_event *ev) return trace_probe_is_enabled(&tk->tp); } +static bool trace_kprobe_match_command_head(struct trace_kprobe *tk, + int argc, const char **argv) +{ + char buf[MAX_ARGSTR_LEN + 1]; + + if (!argc) + return true; + + if (!tk->symbol) + snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr); + else if (tk->rp.kp.offset) + snprintf(buf, sizeof(buf), "%s+%u", + trace_kprobe_symbol(tk), tk->rp.kp.offset); + else + snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk)); + if (strcmp(buf, argv[0])) + return false; + argc--; argv++; + + return trace_probe_match_command_args(&tk->tp, argc, argv); +} + static bool trace_kprobe_match(const char *system, const char *event, - struct dyn_event *ev) + int argc, const char **argv, struct dyn_event *ev) { struct trace_kprobe *tk = to_trace_kprobe(ev); return strcmp(trace_probe_name(&tk->tp), event) == 0 && - (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0); + (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) && + trace_kprobe_match_command_head(tk, argc, argv); } static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) @@ -180,20 +203,33 @@ unsigned long trace_kprobe_address(struct trace_kprobe *tk) return addr; } +static nokprobe_inline struct trace_kprobe * +trace_kprobe_primary_from_call(struct trace_event_call *call) +{ + struct trace_probe *tp; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return NULL; + + return container_of(tp, struct trace_kprobe, tp); +} + bool trace_kprobe_on_func_entry(struct trace_event_call *call) { - struct trace_kprobe *tk = (struct trace_kprobe *)call->data; + struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); - return kprobe_on_func_entry(tk->rp.kp.addr, + return tk ? kprobe_on_func_entry(tk->rp.kp.addr, tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, - tk->rp.kp.addr ? 0 : tk->rp.kp.offset); + tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false; } bool trace_kprobe_error_injectable(struct trace_event_call *call) { - struct trace_kprobe *tk = (struct trace_kprobe *)call->data; + struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); - return within_error_injection_list(trace_kprobe_address(tk)); + return tk ? within_error_injection_list(trace_kprobe_address(tk)) : + false; } static int register_kprobe_event(struct trace_kprobe *tk); @@ -291,32 +327,68 @@ static inline int __enable_trace_kprobe(struct trace_kprobe *tk) return ret; } +static void __disable_trace_kprobe(struct trace_probe *tp) +{ + struct trace_probe *pos; + struct trace_kprobe *tk; + + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + tk = container_of(pos, struct trace_kprobe, tp); + if (!trace_kprobe_is_registered(tk)) + continue; + if (trace_kprobe_is_return(tk)) + disable_kretprobe(&tk->rp); + else + disable_kprobe(&tk->rp.kp); + } +} + /* * Enable trace_probe * if the file is NULL, enable "perf" handler, or enable "trace" handler. */ -static int -enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) +static int enable_trace_kprobe(struct trace_event_call *call, + struct trace_event_file *file) { - bool enabled = trace_probe_is_enabled(&tk->tp); + struct trace_probe *pos, *tp; + struct trace_kprobe *tk; + bool enabled; int ret = 0; + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return -ENODEV; + enabled = trace_probe_is_enabled(tp); + + /* This also changes "enabled" state */ if (file) { - ret = trace_probe_add_file(&tk->tp, file); + ret = trace_probe_add_file(tp, file); if (ret) return ret; } else - trace_probe_set_flag(&tk->tp, TP_FLAG_PROFILE); + trace_probe_set_flag(tp, TP_FLAG_PROFILE); if (enabled) return 0; - ret = __enable_trace_kprobe(tk); + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + tk = container_of(pos, struct trace_kprobe, tp); + if (trace_kprobe_has_gone(tk)) + continue; + ret = __enable_trace_kprobe(tk); + if (ret) + break; + enabled = true; + } + if (ret) { + /* Failed to enable one of them. Roll back all */ + if (enabled) + __disable_trace_kprobe(tp); if (file) - trace_probe_remove_file(&tk->tp, file); + trace_probe_remove_file(tp, file); else - trace_probe_clear_flag(&tk->tp, TP_FLAG_PROFILE); + trace_probe_clear_flag(tp, TP_FLAG_PROFILE); } return ret; @@ -326,11 +398,14 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) * Disable trace_probe * if the file is NULL, disable "perf" handler, or disable "trace" handler. */ -static int -disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) +static int disable_trace_kprobe(struct trace_event_call *call, + struct trace_event_file *file) { - struct trace_probe *tp = &tk->tp; - int ret = 0; + struct trace_probe *tp; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return -ENODEV; if (file) { if (!trace_probe_get_file_link(tp, file)) @@ -341,12 +416,8 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) } else trace_probe_clear_flag(tp, TP_FLAG_PROFILE); - if (!trace_probe_is_enabled(tp) && trace_kprobe_is_registered(tk)) { - if (trace_kprobe_is_return(tk)) - disable_kretprobe(&tk->rp); - else - disable_kprobe(&tk->rp.kp); - } + if (!trace_probe_is_enabled(tp)) + __disable_trace_kprobe(tp); out: if (file) @@ -358,7 +429,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) */ trace_probe_remove_file(tp, file); - return ret; + return 0; } #if defined(CONFIG_KPROBES_ON_FTRACE) && \ @@ -437,6 +508,10 @@ static void __unregister_trace_kprobe(struct trace_kprobe *tk) /* Unregister a trace_probe and probe_event */ static int unregister_trace_kprobe(struct trace_kprobe *tk) { + /* If other probes are on the event, just unregister kprobe */ + if (trace_probe_has_sibling(&tk->tp)) + goto unreg; + /* Enabled event can not be unregistered */ if (trace_probe_is_enabled(&tk->tp)) return -EBUSY; @@ -445,12 +520,81 @@ static int unregister_trace_kprobe(struct trace_kprobe *tk) if (unregister_kprobe_event(tk)) return -EBUSY; +unreg: __unregister_trace_kprobe(tk); dyn_event_remove(&tk->devent); + trace_probe_unlink(&tk->tp); return 0; } +static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig, + struct trace_kprobe *comp) +{ + struct trace_probe_event *tpe = orig->tp.event; + struct trace_probe *pos; + int i; + + list_for_each_entry(pos, &tpe->probes, list) { + orig = container_of(pos, struct trace_kprobe, tp); + if (strcmp(trace_kprobe_symbol(orig), + trace_kprobe_symbol(comp)) || + trace_kprobe_offset(orig) != trace_kprobe_offset(comp)) + continue; + + /* + * trace_probe_compare_arg_type() ensured that nr_args and + * each argument name and type are same. Let's compare comm. + */ + for (i = 0; i < orig->tp.nr_args; i++) { + if (strcmp(orig->tp.args[i].comm, + comp->tp.args[i].comm)) + continue; + } + + return true; + } + + return false; +} + +static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to) +{ + int ret; + + ret = trace_probe_compare_arg_type(&tk->tp, &to->tp); + if (ret) { + /* Note that argument starts index = 2 */ + trace_probe_log_set_index(ret + 1); + trace_probe_log_err(0, DIFF_ARG_TYPE); + return -EEXIST; + } + if (trace_kprobe_has_same_kprobe(to, tk)) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, SAME_PROBE); + return -EEXIST; + } + + /* Append to existing event */ + ret = trace_probe_append(&tk->tp, &to->tp); + if (ret) + return ret; + + /* Register k*probe */ + ret = __register_trace_kprobe(tk); + if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) { + pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); + ret = 0; + } + + if (ret) + trace_probe_unlink(&tk->tp); + else + dyn_event_add(&tk->devent); + + return ret; +} + /* Register a trace_probe and probe_event */ static int register_trace_kprobe(struct trace_kprobe *tk) { @@ -459,14 +603,17 @@ static int register_trace_kprobe(struct trace_kprobe *tk) mutex_lock(&event_mutex); - /* Delete old (same name) event if exist */ old_tk = find_trace_kprobe(trace_probe_name(&tk->tp), trace_probe_group_name(&tk->tp)); if (old_tk) { - ret = unregister_trace_kprobe(old_tk); - if (ret < 0) - goto end; - free_trace_kprobe(old_tk); + if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, DIFF_PROBE_TYPE); + ret = -EEXIST; + } else { + ret = append_trace_kprobe(tk, old_tk); + } + goto end; } /* Register new event */ @@ -700,7 +847,7 @@ static int trace_kprobe_create(int argc, const char *argv[]) trace_probe_log_err(0, BAD_INSN_BNDRY); else if (ret == -ENOENT) trace_probe_log_err(0, BAD_PROBE_ADDR); - else if (ret != -ENOMEM) + else if (ret != -ENOMEM && ret != -EEXIST) trace_probe_log_err(0, FAIL_REG_PROBE); goto error; } @@ -965,6 +1112,9 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, case FETCH_OP_COMM: val = (unsigned long)current->comm; break; + case FETCH_OP_DATA: + val = (unsigned long)code->data; + break; #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API case FETCH_OP_ARG: val = regs_get_kernel_argument(regs, code->param); @@ -1089,7 +1239,10 @@ print_kprobe_event(struct trace_iterator *iter, int flags, struct trace_probe *tp; field = (struct kprobe_trace_entry_head *)iter->ent; - tp = container_of(event, struct trace_probe, call.event); + tp = trace_probe_primary_from_call( + container_of(event, struct trace_event_call, event)); + if (WARN_ON_ONCE(!tp)) + goto out; trace_seq_printf(s, "%s: (", trace_probe_name(tp)); @@ -1116,7 +1269,10 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, struct trace_probe *tp; field = (struct kretprobe_trace_entry_head *)iter->ent; - tp = container_of(event, struct trace_probe, call.event); + tp = trace_probe_primary_from_call( + container_of(event, struct trace_event_call, event)); + if (WARN_ON_ONCE(!tp)) + goto out; trace_seq_printf(s, "%s: (", trace_probe_name(tp)); @@ -1145,23 +1301,31 @@ static int kprobe_event_define_fields(struct trace_event_call *event_call) { int ret; struct kprobe_trace_entry_head field; - struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; + struct trace_probe *tp; + + tp = trace_probe_primary_from_call(event_call); + if (WARN_ON_ONCE(!tp)) + return -ENOENT; DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); - return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); + return traceprobe_define_arg_fields(event_call, sizeof(field), tp); } static int kretprobe_event_define_fields(struct trace_event_call *event_call) { int ret; struct kretprobe_trace_entry_head field; - struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; + struct trace_probe *tp; + + tp = trace_probe_primary_from_call(event_call); + if (WARN_ON_ONCE(!tp)) + return -ENOENT; DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); - return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); + return traceprobe_define_arg_fields(event_call, sizeof(field), tp); } #ifdef CONFIG_PERF_EVENTS @@ -1289,20 +1453,19 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, static int kprobe_register(struct trace_event_call *event, enum trace_reg type, void *data) { - struct trace_kprobe *tk = (struct trace_kprobe *)event->data; struct trace_event_file *file = data; switch (type) { case TRACE_REG_REGISTER: - return enable_trace_kprobe(tk, file); + return enable_trace_kprobe(event, file); case TRACE_REG_UNREGISTER: - return disable_trace_kprobe(tk, file); + return disable_trace_kprobe(event, file); #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return enable_trace_kprobe(tk, NULL); + return enable_trace_kprobe(event, NULL); case TRACE_REG_PERF_UNREGISTER: - return disable_trace_kprobe(tk, NULL); + return disable_trace_kprobe(event, NULL); case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: @@ -1369,7 +1532,6 @@ static inline void init_trace_event_call(struct trace_kprobe *tk) call->flags = TRACE_EVENT_FL_KPROBE; call->class->reg = kprobe_register; - call->data = tk; } static int register_kprobe_event(struct trace_kprobe *tk) @@ -1432,7 +1594,9 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call) { struct trace_kprobe *tk; - tk = container_of(event_call, struct trace_kprobe, tp.call); + tk = trace_kprobe_primary_from_call(event_call); + if (unlikely(!tk)) + return; if (trace_probe_is_enabled(&tk->tp)) { WARN_ON(1); @@ -1577,7 +1741,8 @@ static __init int kprobe_trace_self_tests_init(void) pr_warn("error on getting probe file.\n"); warn++; } else - enable_trace_kprobe(tk, file); + enable_trace_kprobe( + trace_probe_event_call(&tk->tp), file); } } @@ -1598,7 +1763,8 @@ static __init int kprobe_trace_self_tests_init(void) pr_warn("error on getting probe file.\n"); warn++; } else - enable_trace_kprobe(tk, file); + enable_trace_kprobe( + trace_probe_event_call(&tk->tp), file); } } @@ -1631,7 +1797,8 @@ static __init int kprobe_trace_self_tests_init(void) pr_warn("error on getting probe file.\n"); warn++; } else - disable_trace_kprobe(tk, file); + disable_trace_kprobe( + trace_probe_event_call(&tk->tp), file); } tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); @@ -1649,7 +1816,8 @@ static __init int kprobe_trace_self_tests_init(void) pr_warn("error on getting probe file.\n"); warn++; } else - disable_trace_kprobe(tk, file); + disable_trace_kprobe( + trace_probe_event_call(&tk->tp), file); } ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index cab4a5398f1d4a..d54ce252b05a84 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -219,10 +219,10 @@ trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len, { int i; const char *ret = trace_seq_buffer_ptr(p); + const char *fmt = concatenate ? "%*phN" : "%*ph"; - for (i = 0; i < buf_len; i++) - trace_seq_printf(p, "%s%2.2x", concatenate || i == 0 ? "" : " ", - buf[i]); + for (i = 0; i < buf_len; i += 16) + trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]); trace_seq_putc(p, 0); return ret; diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index fb6bfbc5bf8620..baf58a3612c070 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -316,6 +316,29 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, return -EINVAL; } +static int str_to_immediate(char *str, unsigned long *imm) +{ + if (isdigit(str[0])) + return kstrtoul(str, 0, imm); + else if (str[0] == '-') + return kstrtol(str, 0, (long *)imm); + else if (str[0] == '+') + return kstrtol(str + 1, 0, (long *)imm); + return -EINVAL; +} + +static int __parse_imm_string(char *str, char **pbuf, int offs) +{ + size_t len = strlen(str); + + if (str[len - 1] != '"') { + trace_probe_log_err(offs + len, IMMSTR_NO_CLOSE); + return -EINVAL; + } + *pbuf = kstrndup(str, len - 1, GFP_KERNEL); + return 0; +} + /* Recursive argument parser */ static int parse_probe_arg(char *arg, const struct fetch_type *type, @@ -430,7 +453,8 @@ parse_probe_arg(char *arg, const struct fetch_type *type, ret = parse_probe_arg(arg, t2, &code, end, flags, offs); if (ret) break; - if (code->op == FETCH_OP_COMM) { + if (code->op == FETCH_OP_COMM || + code->op == FETCH_OP_DATA) { trace_probe_log_err(offs, COMM_CANT_DEREF); return -EINVAL; } @@ -444,6 +468,21 @@ parse_probe_arg(char *arg, const struct fetch_type *type, code->offset = offset; } break; + case '\\': /* Immediate value */ + if (arg[1] == '"') { /* Immediate string */ + ret = __parse_imm_string(arg + 2, &tmp, offs + 2); + if (ret) + break; + code->op = FETCH_OP_DATA; + code->data = tmp; + } else { + ret = str_to_immediate(arg + 1, &code->immediate); + if (ret) + trace_probe_log_err(offs + 1, BAD_IMM); + else + code->op = FETCH_OP_IMM; + } + break; } if (!ret && code->op == FETCH_OP_NOP) { /* Parsed, but do not find fetch method */ @@ -542,8 +581,11 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, } } - /* Since $comm can not be dereferred, we can find $comm by strcmp */ - if (strcmp(arg, "$comm") == 0) { + /* + * Since $comm and immediate string can not be dereferred, + * we can find those by strcmp. + */ + if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) { /* The type of $comm must be "string", and not an array. */ if (parg->count || (t && strcmp(t, "string"))) return -EINVAL; @@ -580,7 +622,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, if (!strcmp(parg->type->name, "string") || !strcmp(parg->type->name, "ustring")) { if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF && - code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM) { + code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM && + code->op != FETCH_OP_DATA) { trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_STRING); ret = -EINVAL; @@ -589,9 +632,10 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) || parg->count) { /* - * IMM and COMM is pointing actual address, those must - * be kept, and if parg->count != 0, this is an array - * of string pointers instead of string address itself. + * IMM, DATA and COMM is pointing actual address, those + * must be kept, and if parg->count != 0, this is an + * array of string pointers instead of string address + * itself. */ code++; if (code->op != FETCH_OP_NOP) { @@ -665,7 +709,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, fail: if (ret) { for (code = tmp; code < tmp + FETCH_INSN_MAX; code++) - if (code->op == FETCH_NOP_SYMBOL) + if (code->op == FETCH_NOP_SYMBOL || + code->op == FETCH_OP_DATA) kfree(code->data); } kfree(tmp); @@ -736,7 +781,8 @@ void traceprobe_free_probe_arg(struct probe_arg *arg) struct fetch_insn *code = arg->code; while (code && code->op != FETCH_OP_END) { - if (code->op == FETCH_NOP_SYMBOL) + if (code->op == FETCH_NOP_SYMBOL || + code->op == FETCH_OP_DATA) kfree(code->data); code++; } @@ -886,44 +932,85 @@ int traceprobe_define_arg_fields(struct trace_event_call *event_call, return 0; } +static void trace_probe_event_free(struct trace_probe_event *tpe) +{ + kfree(tpe->class.system); + kfree(tpe->call.name); + kfree(tpe->call.print_fmt); + kfree(tpe); +} + +int trace_probe_append(struct trace_probe *tp, struct trace_probe *to) +{ + if (trace_probe_has_sibling(tp)) + return -EBUSY; + + list_del_init(&tp->list); + trace_probe_event_free(tp->event); + + tp->event = to->event; + list_add_tail(&tp->list, trace_probe_probe_list(to)); + + return 0; +} + +void trace_probe_unlink(struct trace_probe *tp) +{ + list_del_init(&tp->list); + if (list_empty(trace_probe_probe_list(tp))) + trace_probe_event_free(tp->event); + tp->event = NULL; +} void trace_probe_cleanup(struct trace_probe *tp) { - struct trace_event_call *call = trace_probe_event_call(tp); int i; for (i = 0; i < tp->nr_args; i++) traceprobe_free_probe_arg(&tp->args[i]); - if (call->class) - kfree(call->class->system); - kfree(call->name); - kfree(call->print_fmt); + if (tp->event) + trace_probe_unlink(tp); } int trace_probe_init(struct trace_probe *tp, const char *event, const char *group) { - struct trace_event_call *call = trace_probe_event_call(tp); + struct trace_event_call *call; + int ret = 0; if (!event || !group) return -EINVAL; - call->class = &tp->class; - call->name = kstrdup(event, GFP_KERNEL); - if (!call->name) + tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL); + if (!tp->event) return -ENOMEM; - tp->class.system = kstrdup(group, GFP_KERNEL); - if (!tp->class.system) { - kfree(call->name); - call->name = NULL; - return -ENOMEM; + INIT_LIST_HEAD(&tp->event->files); + INIT_LIST_HEAD(&tp->event->class.fields); + INIT_LIST_HEAD(&tp->event->probes); + INIT_LIST_HEAD(&tp->list); + list_add(&tp->event->probes, &tp->list); + + call = trace_probe_event_call(tp); + call->class = &tp->event->class; + call->name = kstrdup(event, GFP_KERNEL); + if (!call->name) { + ret = -ENOMEM; + goto error; + } + + tp->event->class.system = kstrdup(group, GFP_KERNEL); + if (!tp->event->class.system) { + ret = -ENOMEM; + goto error; } - INIT_LIST_HEAD(&tp->files); - INIT_LIST_HEAD(&tp->class.fields); return 0; + +error: + trace_probe_cleanup(tp); + return ret; } int trace_probe_register_event_call(struct trace_probe *tp) @@ -952,7 +1039,7 @@ int trace_probe_add_file(struct trace_probe *tp, struct trace_event_file *file) link->file = file; INIT_LIST_HEAD(&link->list); - list_add_tail_rcu(&link->list, &tp->files); + list_add_tail_rcu(&link->list, &tp->event->files); trace_probe_set_flag(tp, TP_FLAG_TRACE); return 0; } @@ -983,8 +1070,45 @@ int trace_probe_remove_file(struct trace_probe *tp, synchronize_rcu(); kfree(link); - if (list_empty(&tp->files)) + if (list_empty(&tp->event->files)) trace_probe_clear_flag(tp, TP_FLAG_TRACE); return 0; } + +/* + * Return the smallest index of different type argument (start from 1). + * If all argument types and name are same, return 0. + */ +int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b) +{ + int i; + + for (i = 0; i < a->nr_args; i++) { + if ((b->nr_args <= i) || + ((a->args[i].type != b->args[i].type) || + (a->args[i].count != b->args[i].count) || + strcmp(a->args[i].name, b->args[i].name))) + return i + 1; + } + + return 0; +} + +bool trace_probe_match_command_args(struct trace_probe *tp, + int argc, const char **argv) +{ + char buf[MAX_ARGSTR_LEN + 1]; + int i; + + if (tp->nr_args < argc) + return false; + + for (i = 0; i < argc; i++) { + snprintf(buf, sizeof(buf), "%s=%s", + tp->args[i].name, tp->args[i].comm); + if (strcmp(buf, argv[i])) + return false; + } + return true; +} diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index d1714820efe194..4ee703728aeca5 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -89,6 +89,7 @@ enum fetch_op { FETCH_OP_COMM, /* Current comm */ FETCH_OP_ARG, /* Function argument : .param */ FETCH_OP_FOFFS, /* File offset: .immediate */ + FETCH_OP_DATA, /* Allocated data: .data */ // Stage 2 (dereference) op FETCH_OP_DEREF, /* Dereference: .offset */ FETCH_OP_UDEREF, /* User-space Dereference: .offset */ @@ -222,11 +223,18 @@ struct probe_arg { const struct fetch_type *type; /* Type of this argument */ }; -struct trace_probe { +/* Event call and class holder */ +struct trace_probe_event { unsigned int flags; /* For TP_FLAG_* */ struct trace_event_class class; struct trace_event_call call; struct list_head files; + struct list_head probes; +}; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; ssize_t size; /* trace entry size */ unsigned int nr_args; struct probe_arg args[]; @@ -240,19 +248,19 @@ struct event_file_link { static inline bool trace_probe_test_flag(struct trace_probe *tp, unsigned int flag) { - return !!(tp->flags & flag); + return !!(tp->event->flags & flag); } static inline void trace_probe_set_flag(struct trace_probe *tp, unsigned int flag) { - tp->flags |= flag; + tp->event->flags |= flag; } static inline void trace_probe_clear_flag(struct trace_probe *tp, unsigned int flag) { - tp->flags &= ~flag; + tp->event->flags &= ~flag; } static inline bool trace_probe_is_enabled(struct trace_probe *tp) @@ -262,45 +270,76 @@ static inline bool trace_probe_is_enabled(struct trace_probe *tp) static inline const char *trace_probe_name(struct trace_probe *tp) { - return trace_event_name(&tp->call); + return trace_event_name(&tp->event->call); } static inline const char *trace_probe_group_name(struct trace_probe *tp) { - return tp->call.class->system; + return tp->event->call.class->system; } static inline struct trace_event_call * trace_probe_event_call(struct trace_probe *tp) { - return &tp->call; + return &tp->event->call; +} + +static inline struct trace_probe_event * +trace_probe_event_from_call(struct trace_event_call *event_call) +{ + return container_of(event_call, struct trace_probe_event, call); +} + +static inline struct trace_probe * +trace_probe_primary_from_call(struct trace_event_call *call) +{ + struct trace_probe_event *tpe = trace_probe_event_from_call(call); + + return list_first_entry(&tpe->probes, struct trace_probe, list); +} + +static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp) +{ + return &tp->event->probes; +} + +static inline bool trace_probe_has_sibling(struct trace_probe *tp) +{ + struct list_head *list = trace_probe_probe_list(tp); + + return !list_empty(list) && !list_is_singular(list); } static inline int trace_probe_unregister_event_call(struct trace_probe *tp) { /* tp->event is unregistered in trace_remove_event_call() */ - return trace_remove_event_call(&tp->call); + return trace_remove_event_call(&tp->event->call); } static inline bool trace_probe_has_single_file(struct trace_probe *tp) { - return !!list_is_singular(&tp->files); + return !!list_is_singular(&tp->event->files); } int trace_probe_init(struct trace_probe *tp, const char *event, const char *group); void trace_probe_cleanup(struct trace_probe *tp); +int trace_probe_append(struct trace_probe *tp, struct trace_probe *to); +void trace_probe_unlink(struct trace_probe *tp); int trace_probe_register_event_call(struct trace_probe *tp); int trace_probe_add_file(struct trace_probe *tp, struct trace_event_file *file); int trace_probe_remove_file(struct trace_probe *tp, struct trace_event_file *file); struct event_file_link *trace_probe_get_file_link(struct trace_probe *tp, struct trace_event_file *file); +int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b); +bool trace_probe_match_command_args(struct trace_probe *tp, + int argc, const char **argv); #define trace_probe_for_each_link(pos, tp) \ - list_for_each_entry(pos, &(tp)->files, list) + list_for_each_entry(pos, &(tp)->event->files, list) #define trace_probe_for_each_link_rcu(pos, tp) \ - list_for_each_entry_rcu(pos, &(tp)->files, list) + list_for_each_entry_rcu(pos, &(tp)->event->files, list) /* Check the name is good for event/group/fields */ static inline bool is_good_name(const char *name) @@ -370,6 +409,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(BAD_VAR, "Invalid $-valiable specified"), \ C(BAD_REG_NAME, "Invalid register name"), \ C(BAD_MEM_ADDR, "Invalid memory address"), \ + C(BAD_IMM, "Invalid immediate value"), \ + C(IMMSTR_NO_CLOSE, "String is not closed with '\"'"), \ C(FILE_ON_KPROBE, "File offset is not available with kprobe"), \ C(BAD_FILE_OFFS, "Invalid file offset value"), \ C(SYM_ON_UPROBE, "Symbol is not available with uprobe"), \ @@ -393,7 +434,10 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(ARG_TOO_LONG, "Argument expression is too long"), \ C(NO_ARG_BODY, "No argument expression"), \ C(BAD_INSN_BNDRY, "Probe point is not an instruction boundary"),\ - C(FAIL_REG_PROBE, "Failed to register probe event"), + C(FAIL_REG_PROBE, "Failed to register probe event"),\ + C(DIFF_PROBE_TYPE, "Probe type is different from existing probe"),\ + C(DIFF_ARG_TYPE, "Argument type or name is different from existing probe"),\ + C(SAME_PROBE, "There is already the exact same probe event"), #undef C #define C(a, b) TP_ERR_##a diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 5d16f73898dbd2..ec9a34a9712923 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -53,6 +53,104 @@ static void print_max_stack(void) } } +/* + * The stack tracer looks for a maximum stack at each call from a function. It + * registers a callback from ftrace, and in that callback it examines the stack + * size. It determines the stack size from the variable passed in, which is the + * address of a local variable in the stack_trace_call() callback function. + * The stack size is calculated by the address of the local variable to the top + * of the current stack. If that size is smaller than the currently saved max + * stack size, nothing more is done. + * + * If the size of the stack is greater than the maximum recorded size, then the + * following algorithm takes place. + * + * For architectures (like x86) that store the function's return address before + * saving the function's local variables, the stack will look something like + * this: + * + * [ top of stack ] + * 0: sys call entry frame + * 10: return addr to entry code + * 11: start of sys_foo frame + * 20: return addr to sys_foo + * 21: start of kernel_func_bar frame + * 30: return addr to kernel_func_bar + * 31: [ do trace stack here ] + * + * The save_stack_trace() is called returning all the functions it finds in the + * current stack. Which would be (from the bottom of the stack to the top): + * + * return addr to kernel_func_bar + * return addr to sys_foo + * return addr to entry code + * + * Now to figure out how much each of these functions' local variable size is, + * a search of the stack is made to find these values. When a match is made, it + * is added to the stack_dump_trace[] array. The offset into the stack is saved + * in the stack_trace_index[] array. The above example would show: + * + * stack_dump_trace[] | stack_trace_index[] + * ------------------ + ------------------- + * return addr to kernel_func_bar | 30 + * return addr to sys_foo | 20 + * return addr to entry | 10 + * + * The print_max_stack() function above, uses these values to print the size of + * each function's portion of the stack. + * + * for (i = 0; i < nr_entries; i++) { + * size = i == nr_entries - 1 ? stack_trace_index[i] : + * stack_trace_index[i] - stack_trace_index[i+1] + * print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]); + * } + * + * The above shows + * + * depth size location + * ----- ---- -------- + * 0 30 10 kernel_func_bar + * 1 20 10 sys_foo + * 2 10 10 entry code + * + * Now for architectures that might save the return address after the functions + * local variables (saving the link register before calling nested functions), + * this will cause the stack to look a little different: + * + * [ top of stack ] + * 0: sys call entry frame + * 10: start of sys_foo_frame + * 19: return addr to entry code << lr saved before calling kernel_func_bar + * 20: start of kernel_func_bar frame + * 29: return addr to sys_foo_frame << lr saved before calling next function + * 30: [ do trace stack here ] + * + * Although the functions returned by save_stack_trace() may be the same, the + * placement in the stack will be different. Using the same algorithm as above + * would yield: + * + * stack_dump_trace[] | stack_trace_index[] + * ------------------ + ------------------- + * return addr to kernel_func_bar | 30 + * return addr to sys_foo | 29 + * return addr to entry | 19 + * + * Where the mapping is off by one: + * + * kernel_func_bar stack frame size is 29 - 19 not 30 - 29! + * + * To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the + * values in stack_trace_index[] are shifted by one to and the number of + * stack trace entries is decremented by one. + * + * stack_dump_trace[] | stack_trace_index[] + * ------------------ + ------------------- + * return addr to kernel_func_bar | 29 + * return addr to sys_foo | 19 + * + * Although the entry function is not displayed, the first function (sys_foo) + * will still include the stack size of it. + */ static void check_stack(unsigned long ip, unsigned long *stack) { unsigned long this_size, flags; unsigned long *p, *top, *start; @@ -158,6 +256,20 @@ static void check_stack(unsigned long ip, unsigned long *stack) i++; } +#ifdef ARCH_FTRACE_SHIFT_STACK_TRACER + /* + * Some archs will store the link register before calling + * nested functions. This means the saved return address + * comes after the local storage, and we need to shift + * for that. + */ + if (x > 1) { + memmove(&stack_trace_index[0], &stack_trace_index[1], + sizeof(stack_trace_index[0]) * (x - 1)); + x--; + } +#endif + stack_trace_nr_entries = x; if (task_stack_end_corrupted(current)) { diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 1ceedb9146b114..34dd6d0016a3b7 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -44,7 +44,7 @@ static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev); static int trace_uprobe_release(struct dyn_event *ev); static bool trace_uprobe_is_busy(struct dyn_event *ev); static bool trace_uprobe_match(const char *system, const char *event, - struct dyn_event *ev); + int argc, const char **argv, struct dyn_event *ev); static struct dyn_event_operations trace_uprobe_ops = { .create = trace_uprobe_create, @@ -248,6 +248,9 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, case FETCH_OP_COMM: val = FETCH_TOKEN_COMM; break; + case FETCH_OP_DATA: + val = (unsigned long)code->data; + break; case FETCH_OP_FOFFS: val = translate_user_vaddr(code->immediate); break; @@ -284,13 +287,54 @@ static bool trace_uprobe_is_busy(struct dyn_event *ev) return trace_probe_is_enabled(&tu->tp); } +static bool trace_uprobe_match_command_head(struct trace_uprobe *tu, + int argc, const char **argv) +{ + char buf[MAX_ARGSTR_LEN + 1]; + int len; + + if (!argc) + return true; + + len = strlen(tu->filename); + if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':') + return false; + + if (tu->ref_ctr_offset == 0) + snprintf(buf, sizeof(buf), "0x%0*lx", + (int)(sizeof(void *) * 2), tu->offset); + else + snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)", + (int)(sizeof(void *) * 2), tu->offset, + tu->ref_ctr_offset); + if (strcmp(buf, &argv[0][len + 1])) + return false; + + argc--; argv++; + + return trace_probe_match_command_args(&tu->tp, argc, argv); +} + static bool trace_uprobe_match(const char *system, const char *event, - struct dyn_event *ev) + int argc, const char **argv, struct dyn_event *ev) { struct trace_uprobe *tu = to_trace_uprobe(ev); return strcmp(trace_probe_name(&tu->tp), event) == 0 && - (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0); + (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) && + trace_uprobe_match_command_head(tu, argc, argv); +} + +static nokprobe_inline struct trace_uprobe * +trace_uprobe_primary_from_call(struct trace_event_call *call) +{ + struct trace_probe *tp; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return NULL; + + return container_of(tp, struct trace_uprobe, tp); } /* @@ -352,15 +396,75 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu) { int ret; + if (trace_probe_has_sibling(&tu->tp)) + goto unreg; + ret = unregister_uprobe_event(tu); if (ret) return ret; +unreg: dyn_event_remove(&tu->devent); + trace_probe_unlink(&tu->tp); free_trace_uprobe(tu); return 0; } +static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig, + struct trace_uprobe *comp) +{ + struct trace_probe_event *tpe = orig->tp.event; + struct trace_probe *pos; + struct inode *comp_inode = d_real_inode(comp->path.dentry); + int i; + + list_for_each_entry(pos, &tpe->probes, list) { + orig = container_of(pos, struct trace_uprobe, tp); + if (comp_inode != d_real_inode(orig->path.dentry) || + comp->offset != orig->offset) + continue; + + /* + * trace_probe_compare_arg_type() ensured that nr_args and + * each argument name and type are same. Let's compare comm. + */ + for (i = 0; i < orig->tp.nr_args; i++) { + if (strcmp(orig->tp.args[i].comm, + comp->tp.args[i].comm)) + continue; + } + + return true; + } + + return false; +} + +static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to) +{ + int ret; + + ret = trace_probe_compare_arg_type(&tu->tp, &to->tp); + if (ret) { + /* Note that argument starts index = 2 */ + trace_probe_log_set_index(ret + 1); + trace_probe_log_err(0, DIFF_ARG_TYPE); + return -EEXIST; + } + if (trace_uprobe_has_same_uprobe(to, tu)) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, SAME_PROBE); + return -EEXIST; + } + + /* Append to existing event */ + ret = trace_probe_append(&tu->tp, &to->tp); + if (!ret) + dyn_event_add(&tu->devent); + + return ret; +} + /* * Uprobe with multiple reference counter is not allowed. i.e. * If inode and offset matches, reference counter offset *must* @@ -370,25 +474,21 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu) * as the new one does not conflict with any other existing * ones. */ -static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new) +static int validate_ref_ctr_offset(struct trace_uprobe *new) { struct dyn_event *pos; - struct trace_uprobe *tmp, *old = NULL; + struct trace_uprobe *tmp; struct inode *new_inode = d_real_inode(new->path.dentry); - old = find_probe_event(trace_probe_name(&new->tp), - trace_probe_group_name(&new->tp)); - for_each_trace_uprobe(tmp, pos) { - if ((old ? old != tmp : true) && - new_inode == d_real_inode(tmp->path.dentry) && + if (new_inode == d_real_inode(tmp->path.dentry) && new->offset == tmp->offset && new->ref_ctr_offset != tmp->ref_ctr_offset) { pr_warn("Reference counter offset mismatch."); - return ERR_PTR(-EINVAL); + return -EINVAL; } } - return old; + return 0; } /* Register a trace_uprobe and probe_event */ @@ -399,18 +499,22 @@ static int register_trace_uprobe(struct trace_uprobe *tu) mutex_lock(&event_mutex); - /* register as an event */ - old_tu = find_old_trace_uprobe(tu); - if (IS_ERR(old_tu)) { - ret = PTR_ERR(old_tu); + ret = validate_ref_ctr_offset(tu); + if (ret) goto end; - } + /* register as an event */ + old_tu = find_probe_event(trace_probe_name(&tu->tp), + trace_probe_group_name(&tu->tp)); if (old_tu) { - /* delete old event */ - ret = unregister_trace_uprobe(old_tu); - if (ret) - goto end; + if (is_ret_probe(tu) != is_ret_probe(old_tu)) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, DIFF_PROBE_TYPE); + ret = -EEXIST; + } else { + ret = append_trace_uprobe(tu, old_tu); + } + goto end; } ret = register_uprobe_event(tu); @@ -897,7 +1001,10 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e u8 *data; entry = (struct uprobe_trace_entry_head *)iter->ent; - tu = container_of(event, struct trace_uprobe, tp.call.event); + tu = trace_uprobe_primary_from_call( + container_of(event, struct trace_event_call, event)); + if (unlikely(!tu)) + goto out; if (is_ret_probe(tu)) { trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", @@ -924,27 +1031,71 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, enum uprobe_filter_ctx ctx, struct mm_struct *mm); -static int -probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, - filter_func_t filter) +static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter) { - bool enabled = trace_probe_is_enabled(&tu->tp); int ret; + tu->consumer.filter = filter; + tu->inode = d_real_inode(tu->path.dentry); + + if (tu->ref_ctr_offset) + ret = uprobe_register_refctr(tu->inode, tu->offset, + tu->ref_ctr_offset, &tu->consumer); + else + ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); + + if (ret) + tu->inode = NULL; + + return ret; +} + +static void __probe_event_disable(struct trace_probe *tp) +{ + struct trace_probe *pos; + struct trace_uprobe *tu; + + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + tu = container_of(pos, struct trace_uprobe, tp); + if (!tu->inode) + continue; + + WARN_ON(!uprobe_filter_is_empty(&tu->filter)); + + uprobe_unregister(tu->inode, tu->offset, &tu->consumer); + tu->inode = NULL; + } +} + +static int probe_event_enable(struct trace_event_call *call, + struct trace_event_file *file, filter_func_t filter) +{ + struct trace_probe *pos, *tp; + struct trace_uprobe *tu; + bool enabled; + int ret; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return -ENODEV; + enabled = trace_probe_is_enabled(tp); + + /* This may also change "enabled" state */ if (file) { - if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) + if (trace_probe_test_flag(tp, TP_FLAG_PROFILE)) return -EINTR; - ret = trace_probe_add_file(&tu->tp, file); + ret = trace_probe_add_file(tp, file); if (ret < 0) return ret; } else { - if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) + if (trace_probe_test_flag(tp, TP_FLAG_TRACE)) return -EINTR; - trace_probe_set_flag(&tu->tp, TP_FLAG_PROFILE); + trace_probe_set_flag(tp, TP_FLAG_PROFILE); } + tu = container_of(tp, struct trace_uprobe, tp); WARN_ON(!uprobe_filter_is_empty(&tu->filter)); if (enabled) @@ -954,18 +1105,15 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, if (ret) goto err_flags; - tu->consumer.filter = filter; - tu->inode = d_real_inode(tu->path.dentry); - if (tu->ref_ctr_offset) { - ret = uprobe_register_refctr(tu->inode, tu->offset, - tu->ref_ctr_offset, &tu->consumer); - } else { - ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + tu = container_of(pos, struct trace_uprobe, tp); + ret = trace_uprobe_enable(tu, filter); + if (ret) { + __probe_event_disable(tp); + goto err_buffer; + } } - if (ret) - goto err_buffer; - return 0; err_buffer: @@ -973,33 +1121,35 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, err_flags: if (file) - trace_probe_remove_file(&tu->tp, file); + trace_probe_remove_file(tp, file); else - trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE); + trace_probe_clear_flag(tp, TP_FLAG_PROFILE); return ret; } -static void -probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) +static void probe_event_disable(struct trace_event_call *call, + struct trace_event_file *file) { - if (!trace_probe_is_enabled(&tu->tp)) + struct trace_probe *tp; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return; + + if (!trace_probe_is_enabled(tp)) return; if (file) { - if (trace_probe_remove_file(&tu->tp, file) < 0) + if (trace_probe_remove_file(tp, file) < 0) return; - if (trace_probe_is_enabled(&tu->tp)) + if (trace_probe_is_enabled(tp)) return; } else - trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE); - - WARN_ON(!uprobe_filter_is_empty(&tu->filter)); - - uprobe_unregister(tu->inode, tu->offset, &tu->consumer); - tu->inode = NULL; + trace_probe_clear_flag(tp, TP_FLAG_PROFILE); + __probe_event_disable(tp); uprobe_buffer_disable(); } @@ -1007,7 +1157,11 @@ static int uprobe_event_define_fields(struct trace_event_call *event_call) { int ret, size; struct uprobe_trace_entry_head field; - struct trace_uprobe *tu = event_call->data; + struct trace_uprobe *tu; + + tu = trace_uprobe_primary_from_call(event_call); + if (unlikely(!tu)) + return -ENODEV; if (is_ret_probe(tu)) { DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); @@ -1100,6 +1254,27 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) return err; } +static int uprobe_perf_multi_call(struct trace_event_call *call, + struct perf_event *event, + int (*op)(struct trace_uprobe *tu, struct perf_event *event)) +{ + struct trace_probe *pos, *tp; + struct trace_uprobe *tu; + int ret = 0; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return -ENODEV; + + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + tu = container_of(pos, struct trace_uprobe, tp); + ret = op(tu, event); + if (ret) + break; + } + + return ret; +} static bool uprobe_perf_filter(struct uprobe_consumer *uc, enum uprobe_filter_ctx ctx, struct mm_struct *mm) { @@ -1213,30 +1388,29 @@ static int trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, void *data) { - struct trace_uprobe *tu = event->data; struct trace_event_file *file = data; switch (type) { case TRACE_REG_REGISTER: - return probe_event_enable(tu, file, NULL); + return probe_event_enable(event, file, NULL); case TRACE_REG_UNREGISTER: - probe_event_disable(tu, file); + probe_event_disable(event, file); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return probe_event_enable(tu, NULL, uprobe_perf_filter); + return probe_event_enable(event, NULL, uprobe_perf_filter); case TRACE_REG_PERF_UNREGISTER: - probe_event_disable(tu, NULL); + probe_event_disable(event, NULL); return 0; case TRACE_REG_PERF_OPEN: - return uprobe_perf_open(tu, data); + return uprobe_perf_multi_call(event, data, uprobe_perf_open); case TRACE_REG_PERF_CLOSE: - return uprobe_perf_close(tu, data); + return uprobe_perf_multi_call(event, data, uprobe_perf_close); #endif default: @@ -1330,7 +1504,6 @@ static inline void init_trace_event_call(struct trace_uprobe *tu) call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; call->class->reg = trace_uprobe_register; - call->data = tu; } static int register_uprobe_event(struct trace_uprobe *tu) @@ -1399,7 +1572,7 @@ void destroy_local_trace_uprobe(struct trace_event_call *event_call) { struct trace_uprobe *tu; - tu = container_of(event_call, struct trace_uprobe, tp.call); + tu = trace_uprobe_primary_from_call(event_call); free_trace_uprobe(tu); } diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c index 176f03b83e560a..1b61d874e33747 100644 --- a/lib/lz4/lz4hc_compress.c +++ b/lib/lz4/lz4hc_compress.c @@ -663,7 +663,6 @@ static void LZ4HC_setExternalDict( /* match referencing will resume from there */ ctxPtr->nextToUpdate = ctxPtr->dictLimit; } -EXPORT_SYMBOL(LZ4HC_setExternalDict); static int LZ4_compressHC_continue_generic( LZ4_streamHC_t *LZ4_streamHCPtr, diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index 92e770a06ea21b..ce84a300a4dafd 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -152,20 +152,9 @@ static const struct file_operations vd_fops = { /* function prototypes */ -static int mtty_trigger_interrupt(const guid_t *uuid); +static int mtty_trigger_interrupt(struct mdev_state *mdev_state); /* Helper functions */ -static struct mdev_state *find_mdev_state_by_uuid(const guid_t *uuid) -{ - struct mdev_state *mds; - - list_for_each_entry(mds, &mdev_devices_list, next) { - if (guid_equal(mdev_uuid(mds->mdev), uuid)) - return mds; - } - - return NULL; -} static void dump_buffer(u8 *buf, uint32_t count) { @@ -337,8 +326,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, pr_err("Serial port %d: Fifo level trigger\n", index); #endif - mtty_trigger_interrupt( - mdev_uuid(mdev_state->mdev)); + mtty_trigger_interrupt(mdev_state); } } else { #if defined(DEBUG_INTR) @@ -352,8 +340,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, */ if (mdev_state->s[index].uart_reg[UART_IER] & UART_IER_RLSI) - mtty_trigger_interrupt( - mdev_uuid(mdev_state->mdev)); + mtty_trigger_interrupt(mdev_state); } mutex_unlock(&mdev_state->rxtx_lock); break; @@ -372,8 +359,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, pr_err("Serial port %d: IER_THRI write\n", index); #endif - mtty_trigger_interrupt( - mdev_uuid(mdev_state->mdev)); + mtty_trigger_interrupt(mdev_state); } mutex_unlock(&mdev_state->rxtx_lock); @@ -444,7 +430,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, #if defined(DEBUG_INTR) pr_err("Serial port %d: MCR_OUT2 write\n", index); #endif - mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev)); + mtty_trigger_interrupt(mdev_state); } if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && @@ -452,7 +438,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, #if defined(DEBUG_INTR) pr_err("Serial port %d: MCR RTS/DTR write\n", index); #endif - mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev)); + mtty_trigger_interrupt(mdev_state); } break; @@ -503,8 +489,7 @@ static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state, #endif if (mdev_state->s[index].uart_reg[UART_IER] & UART_IER_THRI) - mtty_trigger_interrupt( - mdev_uuid(mdev_state->mdev)); + mtty_trigger_interrupt(mdev_state); } mutex_unlock(&mdev_state->rxtx_lock); @@ -1028,17 +1013,9 @@ static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags, return ret; } -static int mtty_trigger_interrupt(const guid_t *uuid) +static int mtty_trigger_interrupt(struct mdev_state *mdev_state) { int ret = -1; - struct mdev_state *mdev_state; - - mdev_state = find_mdev_state_by_uuid(uuid); - - if (!mdev_state) { - pr_info("%s: mdev not found\n", __func__); - return -EINVAL; - } if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) && (!mdev_state->msi_evtfd)) diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include index 4bbf4fc163a297..d4adfbe426903e 100644 --- a/scripts/Kconfig.include +++ b/scripts/Kconfig.include @@ -35,5 +35,8 @@ ld-option = $(success,$(LD) -v $(1)) $(error-if,$(failure,command -v $(CC)),compiler '$(CC)' not found) $(error-if,$(failure,command -v $(LD)),linker '$(LD)' not found) +# Fail if the linker is gold as it's not capable of linking the kernel proper +$(error-if,$(success, $(LD) -v | grep -q gold), gold linker '$(LD)' not supported) + # gcc version including patch level gcc-version := $(shell,$(srctree)/scripts/gcc-version.sh $(CC)) diff --git a/scripts/Makefile b/scripts/Makefile index 16bcb80878997c..c42891e10ba30d 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -36,4 +36,4 @@ subdir-$(CONFIG_MODVERSIONS) += genksyms subdir-$(CONFIG_SECURITY_SELINUX) += selinux # Let clean descend into subdirs -subdir- += basic dtc gdb kconfig mod package +subdir- += basic dtc gdb kconfig mod diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 2f66ed388d1c33..f72aba64d611d2 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -52,7 +52,7 @@ ifndef obj $(warning kbuild: Makefile.build is included improperly) endif -ifeq ($(MAKECMDGOALS)$(need-modorder),) +ifeq ($(need-modorder),) ifneq ($(obj-m),) $(warning $(patsubst %.o,'%.ko',$(obj-m)) will not be built even though obj-m is specified.) $(warning You cannot use subdir-y/m to visit a module Makefile. Use obj-y/m instead.) @@ -76,11 +76,6 @@ endif mod-targets := $(patsubst %.o, %.mod, $(obj-m)) -__build: $(if $(KBUILD_BUILTIN),$(builtin-target) $(lib-target) $(extra-y)) \ - $(if $(KBUILD_MODULES),$(obj-m) $(mod-targets) $(modorder-target)) \ - $(subdir-ym) $(always) - @: - # Linus' kernel sanity checking tool ifeq ($(KBUILD_CHECKSRC),1) quiet_cmd_checksrc = CHECK $< @@ -90,23 +85,13 @@ else ifeq ($(KBUILD_CHECKSRC),2) cmd_force_checksrc = $(CHECK) $(CHECKFLAGS) $(c_flags) $< endif -ifneq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),) +ifneq ($(KBUILD_EXTRA_WARN),) cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $< endif # Compile C sources (.c) # --------------------------------------------------------------------------- -# Default is built-in, unless we know otherwise -$(foreach x, i ll lst o s symtypes, $(patsubst %.o,%.$(x),$(real-obj-m))): \ - part-of-module := y - -modkern_cflags = \ - $(if $(part-of-module), \ - $(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE), \ - $(KBUILD_CFLAGS_KERNEL) $(CFLAGS_KERNEL)) -quiet_modtag = $(if $(part-of-module),[M], ) - quiet_cmd_cc_s_c = CC $(quiet_modtag) $@ cmd_cc_s_c = $(CC) $(filter-out $(DEBUG_CFLAGS), $(c_flags)) $(DISABLE_LTO) -fverbose-asm -S -o $@ $< @@ -310,11 +295,6 @@ $(obj)/%.h.s: $(src)/%.h FORCE # Compile assembler sources (.S) # --------------------------------------------------------------------------- -modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL) - -$(real-obj-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) -$(real-obj-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) - # .S file exports must have their C prototypes defined in asm/asm-prototypes.h # or a file that it includes, in order to get versioned symbols. We build a # dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from @@ -356,11 +336,7 @@ $(obj)/%.s: $(src)/%.S FORCE quiet_cmd_as_o_S = AS $(quiet_modtag) $@ cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< -ifdef CONFIG_MODVERSIONS - -ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h) - -ifneq ($(ASM_PROTOTYPES),) +ifdef CONFIG_ASM_MODVERSIONS # versioning matches the C process described above, with difference that # we parse asm-prototypes.h C header to get function definitions. @@ -376,7 +352,6 @@ cmd_modversions_S = \ rm -f $(@D)/.tmp_$(@F:.o=.ver); \ fi endif -endif $(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE $(call if_changed_rule,as_o_S) @@ -395,9 +370,9 @@ $(obj)/%.lds: $(src)/%.lds.S FORCE # ASN.1 grammar # --------------------------------------------------------------------------- -quiet_cmd_asn1_compiler = ASN.1 $@ +quiet_cmd_asn1_compiler = ASN.1 $(basename $@).[ch] cmd_asn1_compiler = $(objtree)/scripts/asn1_compiler $< \ - $(subst .h,.c,$@) $(subst .c,.h,$@) + $(basename $@).c $(basename $@).h $(obj)/%.asn1.c $(obj)/%.asn1.h: $(src)/%.asn1 $(objtree)/scripts/asn1_compiler $(call cmd,asn1_compiler) @@ -489,12 +464,50 @@ targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \ $(call intermediate_targets, .lex.o, .lex.c) \ $(call intermediate_targets, .tab.o, .tab.c .tab.h) +# Build +# --------------------------------------------------------------------------- + +ifdef single-build + +curdir-single := $(sort $(foreach x, $(KBUILD_SINGLE_TARGETS), \ + $(if $(filter $(x) $(basename $(x)).o, $(targets)), $(x)))) + +# Handle single targets without any rule: show "Nothing to be done for ..." or +# "No rule to make target ..." depending on whether the target exists. +unknown-single := $(filter-out $(addsuffix /%, $(subdir-ym)), \ + $(filter $(obj)/%, \ + $(filter-out $(curdir-single), \ + $(KBUILD_SINGLE_TARGETS)))) + +__build: $(curdir-single) $(subdir-ym) +ifneq ($(unknown-single),) + $(Q)$(MAKE) -f /dev/null $(unknown-single) +endif + @: + +ifeq ($(curdir-single),) +# Nothing to do in this directory. Do not include any .*.cmd file for speed-up +targets := +else +targets += $(curdir-single) +endif + +else + +__build: $(if $(KBUILD_BUILTIN),$(builtin-target) $(lib-target) $(extra-y)) \ + $(if $(KBUILD_MODULES),$(obj-m) $(mod-targets) $(modorder-target)) \ + $(subdir-ym) $(always) + @: + +endif + # Descending # --------------------------------------------------------------------------- PHONY += $(subdir-ym) $(subdir-ym): $(Q)$(MAKE) $(build)=$@ \ + $(if $(filter $@/, $(KBUILD_SINGLE_TARGETS)),single-build=) \ need-builtin=$(if $(filter $@/built-in.a, $(subdir-obj-y)),1) \ need-modorder=$(if $(need-modorder),$(if $(filter $@/modules.order, $(modorder)),1)) diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 0b80e3207b20d8..e367eb95c5c080 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean @@ -17,17 +17,8 @@ include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-di # Figure out what we need to build from the various variables # ========================================================================== -__subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y))) -subdir-y += $(__subdir-y) -__subdir-m := $(patsubst %/,%,$(filter %/, $(obj-m))) -subdir-m += $(__subdir-m) -__subdir- := $(patsubst %/,%,$(filter %/, $(obj-))) -subdir- += $(__subdir-) - -# Subdirectories we need to descend into - -subdir-ym := $(sort $(subdir-y) $(subdir-m)) -subdir-ymn := $(sort $(subdir-ym) $(subdir-)) +subdir-ymn := $(sort $(subdir-y) $(subdir-m) $(subdir-) \ + $(patsubst %/,%, $(filter %/, $(obj-y) $(obj-m) $(obj-)))) # Add subdir path @@ -52,26 +43,14 @@ __clean-files := $(wildcard \ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \ $(filter $(objtree)/%, $(__clean-files))) -# same as clean-files - -__clean-dirs := $(wildcard \ - $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \ - $(filter $(objtree)/%, $(clean-dirs))) - # ========================================================================== -quiet_cmd_clean = CLEAN $(obj) - cmd_clean = rm -f $(__clean-files) -quiet_cmd_cleandir = CLEAN $(__clean-dirs) - cmd_cleandir = rm -rf $(__clean-dirs) - +quiet_cmd_clean = CLEAN $(obj) + cmd_clean = rm -rf $(__clean-files) __clean: $(subdir-ymn) ifneq ($(strip $(__clean-files)),) - +$(call cmd,clean) -endif -ifneq ($(strip $(__clean-dirs)),) - +$(call cmd,cleandir) + $(call cmd,clean) endif @: diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index a74ce2e3c33eb4..ecddf83ac14291 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -1,74 +1,91 @@ # SPDX-License-Identifier: GPL-2.0 # ========================================================================== -# # make W=... settings # -# W=1 - warnings that may be relevant and does not occur too often -# W=2 - warnings that occur quite often but may still be relevant -# W=3 - the more obscure warnings, can most likely be ignored -# -# $(call cc-option, -W...) handles gcc -W.. options which -# are not supported by all versions of the compiler +# There are three warning groups enabled by W=1, W=2, W=3. +# They are independent, and can be combined like W=12 or W=123. # ========================================================================== KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned) +# backward compatibility +KBUILD_EXTRA_WARN ?= $(KBUILD_ENABLE_EXTRA_GCC_CHECKS) + ifeq ("$(origin W)", "command line") - export KBUILD_ENABLE_EXTRA_GCC_CHECKS := $(W) + KBUILD_EXTRA_WARN := $(W) endif -ifdef KBUILD_ENABLE_EXTRA_GCC_CHECKS -warning- := $(empty) - -warning-1 := -Wextra -Wunused -Wno-unused-parameter -warning-1 += -Wmissing-declarations -warning-1 += -Wmissing-format-attribute -warning-1 += -Wmissing-prototypes -warning-1 += -Wold-style-definition -warning-1 += -Wmissing-include-dirs -warning-1 += $(call cc-option, -Wunused-but-set-variable) -warning-1 += $(call cc-option, -Wunused-const-variable) -warning-1 += $(call cc-option, -Wpacked-not-aligned) -warning-1 += $(call cc-option, -Wstringop-truncation) +export KBUILD_EXTRA_WARN + +# +# W=1 - warnings which may be relevant and do not occur too often +# +ifneq ($(findstring 1, $(KBUILD_EXTRA_WARN)),) + +KBUILD_CFLAGS += -Wextra -Wunused -Wno-unused-parameter +KBUILD_CFLAGS += -Wmissing-declarations +KBUILD_CFLAGS += -Wmissing-format-attribute +KBUILD_CFLAGS += -Wmissing-prototypes +KBUILD_CFLAGS += -Wold-style-definition +KBUILD_CFLAGS += -Wmissing-include-dirs +KBUILD_CFLAGS += $(call cc-option, -Wunused-but-set-variable) +KBUILD_CFLAGS += $(call cc-option, -Wunused-const-variable) +KBUILD_CFLAGS += $(call cc-option, -Wpacked-not-aligned) +KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation) # The following turn off the warnings enabled by -Wextra -warning-1 += -Wno-missing-field-initializers -warning-1 += -Wno-sign-compare - -warning-2 += -Wcast-align -warning-2 += -Wdisabled-optimization -warning-2 += -Wnested-externs -warning-2 += -Wshadow -warning-2 += $(call cc-option, -Wlogical-op) -warning-2 += -Wmissing-field-initializers -warning-2 += -Wsign-compare -warning-2 += $(call cc-option, -Wmaybe-uninitialized) -warning-2 += $(call cc-option, -Wunused-macros) - -warning-3 := -Wbad-function-cast -warning-3 += -Wcast-qual -warning-3 += -Wconversion -warning-3 += -Wpacked -warning-3 += -Wpadded -warning-3 += -Wpointer-arith -warning-3 += -Wredundant-decls -warning-3 += -Wswitch-default -warning-3 += $(call cc-option, -Wpacked-bitfield-compat) - -warning := $(warning-$(findstring 1, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS))) -warning += $(warning-$(findstring 2, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS))) -warning += $(warning-$(findstring 3, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS))) - -ifeq ("$(strip $(warning))","") - $(error W=$(KBUILD_ENABLE_EXTRA_GCC_CHECKS) is unknown) -endif +KBUILD_CFLAGS += -Wno-missing-field-initializers +KBUILD_CFLAGS += -Wno-sign-compare + +KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1 -KBUILD_CFLAGS += $(warning) else +# Some diagnostics enabled by default are noisy. +# Suppress them by using -Wno... except for W=1. + ifdef CONFIG_CC_IS_CLANG KBUILD_CFLAGS += -Wno-initializer-overrides KBUILD_CFLAGS += -Wno-format KBUILD_CFLAGS += -Wno-sign-compare KBUILD_CFLAGS += -Wno-format-zero-length endif + +endif + +# +# W=2 - warnings which occur quite often but may still be relevant +# +ifneq ($(findstring 2, $(KBUILD_EXTRA_WARN)),) + +KBUILD_CFLAGS += -Wcast-align +KBUILD_CFLAGS += -Wdisabled-optimization +KBUILD_CFLAGS += -Wnested-externs +KBUILD_CFLAGS += -Wshadow +KBUILD_CFLAGS += $(call cc-option, -Wlogical-op) +KBUILD_CFLAGS += -Wmissing-field-initializers +KBUILD_CFLAGS += -Wsign-compare +KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized) +KBUILD_CFLAGS += $(call cc-option, -Wunused-macros) + +KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN2 + +endif + +# +# W=3 - more obscure warnings, can most likely be ignored +# +ifneq ($(findstring 3, $(KBUILD_EXTRA_WARN)),) + +KBUILD_CFLAGS += -Wbad-function-cast +KBUILD_CFLAGS += -Wcast-qual +KBUILD_CFLAGS += -Wconversion +KBUILD_CFLAGS += -Wpacked +KBUILD_CFLAGS += -Wpadded +KBUILD_CFLAGS += -Wpointer-arith +KBUILD_CFLAGS += -Wredundant-decls +KBUILD_CFLAGS += -Wswitch-default +KBUILD_CFLAGS += $(call cc-option, -Wpacked-bitfield-compat) + +KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN3 + endif diff --git a/scripts/Makefile.host b/scripts/Makefile.host index 2208ebbd8c4c02..4c51c95d40f478 100644 --- a/scripts/Makefile.host +++ b/scripts/Makefile.host @@ -1,4 +1,21 @@ # SPDX-License-Identifier: GPL-2.0 + +# LEX +# --------------------------------------------------------------------------- +quiet_cmd_flex = LEX $@ + cmd_flex = $(LEX) -o$@ -L $< + +$(obj)/%.lex.c: $(src)/%.l FORCE + $(call if_changed,flex) + +# YACC +# --------------------------------------------------------------------------- +quiet_cmd_bison = YACC $(basename $@).[ch] + cmd_bison = $(YACC) -o $(basename $@).c --defines=$(basename $@).h -t -l $< + +$(obj)/%.tab.c $(obj)/%.tab.h: $(src)/%.y FORCE + $(call if_changed,bison) + # ========================================================================== # Building binaries on the host system # Binaries are used during the compilation of the kernel, for example @@ -63,9 +80,9 @@ host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs)) # Handle options to gcc. Support building with separate output directory _hostc_flags = $(KBUILD_HOSTCFLAGS) $(HOST_EXTRACFLAGS) \ - $(HOSTCFLAGS_$(basetarget).o) + $(HOSTCFLAGS_$(target-stem).o) _hostcxx_flags = $(KBUILD_HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \ - $(HOSTCXXFLAGS_$(basetarget).o) + $(HOSTCXXFLAGS_$(target-stem).o) # $(objtree)/$(obj) for including generated headers from checkin source files ifeq ($(KBUILD_EXTMOD),) @@ -85,7 +102,7 @@ hostcxx_flags = -Wp,-MD,$(depfile) $(_hostcxx_flags) # host-csingle -> Executable quiet_cmd_host-csingle = HOSTCC $@ cmd_host-csingle = $(HOSTCC) $(hostc_flags) $(KBUILD_HOSTLDFLAGS) -o $@ $< \ - $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F)) + $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(target-stem)) $(host-csingle): $(obj)/%: $(src)/%.c FORCE $(call if_changed_dep,host-csingle) @@ -93,8 +110,8 @@ $(host-csingle): $(obj)/%: $(src)/%.c FORCE # host-cmulti -> executable quiet_cmd_host-cmulti = HOSTLD $@ cmd_host-cmulti = $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -o $@ \ - $(addprefix $(obj)/,$($(@F)-objs)) \ - $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F)) + $(addprefix $(obj)/, $($(target-stem)-objs)) \ + $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(target-stem)) $(host-cmulti): FORCE $(call if_changed,host-cmulti) $(call multi_depend, $(host-cmulti), , -objs) @@ -111,8 +128,8 @@ $(host-cobjs): $(obj)/%.o: $(src)/%.c FORCE quiet_cmd_host-cxxmulti = HOSTLD $@ cmd_host-cxxmulti = $(HOSTCXX) $(KBUILD_HOSTLDFLAGS) -o $@ \ $(foreach o,objs cxxobjs,\ - $(addprefix $(obj)/,$($(@F)-$(o)))) \ - $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F)) + $(addprefix $(obj)/, $($(target-stem)-$(o)))) \ + $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(target-stem)) $(host-cxxmulti): FORCE $(call if_changed,host-cxxmulti) $(call multi_depend, $(host-cxxmulti), , -objs -cxxobjs) @@ -144,8 +161,8 @@ $(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE # *.o -> .so shared library (host-cshlib) quiet_cmd_host-cshlib = HOSTLLD -shared $@ cmd_host-cshlib = $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -shared -o $@ \ - $(addprefix $(obj)/,$($(@F:.so=-objs))) \ - $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F)) + $(addprefix $(obj)/, $($(target-stem)-objs)) \ + $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(target-stem).so) $(host-cshlib): FORCE $(call if_changed,host-cshlib) $(call multi_depend, $(host-cshlib), .so, -objs) @@ -154,8 +171,8 @@ $(call multi_depend, $(host-cshlib), .so, -objs) # *.o -> .so shared library (host-cxxshlib) quiet_cmd_host-cxxshlib = HOSTLLD -shared $@ cmd_host-cxxshlib = $(HOSTCXX) $(KBUILD_HOSTLDFLAGS) -shared -o $@ \ - $(addprefix $(obj)/,$($(@F:.so=-objs))) \ - $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F)) + $(addprefix $(obj)/, $($(target-stem)-objs)) \ + $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(target-stem).so) $(host-cxxshlib): FORCE $(call if_changed,host-cxxshlib) $(call multi_depend, $(host-cxxshlib), .so, -objs) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 41c50f9461e51f..4a0cdd6f5909a7 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -41,9 +41,9 @@ obj-m := $(filter-out %/, $(obj-m)) # Subdirectories we need to descend into subdir-ym := $(sort $(subdir-y) $(subdir-m)) -# if $(foo-objs), $(foo-y), or $(foo-m) exists, foo.o is a composite object -multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m)))) -multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))), $(m)))) +# If $(foo-objs), $(foo-y), $(foo-m), or $(foo-) exists, foo.o is a composite object +multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-))), $(m)))) +multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)) $($(m:.o=-))), $(m)))) multi-used := $(multi-used-y) $(multi-used-m) # $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to @@ -52,8 +52,8 @@ subdir-obj-y := $(filter %/built-in.a, $(obj-y)) # Replace multi-part objects by their individual parts, # including built-in.a from subdirectories -real-obj-y := $(foreach m, $(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) -real-obj-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m))) +real-obj-y := $(foreach m, $(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) +real-obj-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)) $($(m:.o=-))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m))) # DTB # If CONFIG_OF_ALL_DTBS is enabled, all DT blobs are built @@ -101,6 +101,9 @@ modname-multi = $(subst $(space),:,$(sort $(foreach m,$(multi-used),\ modname = $(if $(modname-multi),$(modname-multi),$(basetarget)) +# target with $(obj)/ and its suffix stripped +target-stem = $(basename $(patsubst $(obj)/%,%,$@)) + # These flags are needed for modversions and compiling, so we define them here # $(modname_flags) defines KBUILD_MODNAME as the name of the module it will # end up in (or would, if it gets compiled in) @@ -109,12 +112,12 @@ basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget)) modname_flags = -DKBUILD_MODNAME=$(call name-fix,$(modname)) orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \ - $(ccflags-y) $(CFLAGS_$(basetarget).o) -_c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags)) + $(ccflags-y) $(CFLAGS_$(target-stem).o) +_c_flags = $(filter-out $(CFLAGS_REMOVE_$(target-stem).o), $(orig_c_flags)) orig_a_flags = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) \ - $(asflags-y) $(AFLAGS_$(basetarget).o) -_a_flags = $(filter-out $(AFLAGS_REMOVE_$(basetarget).o), $(orig_a_flags)) -_cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(@F)) + $(asflags-y) $(AFLAGS_$(target-stem).o) +_a_flags = $(filter-out $(AFLAGS_REMOVE_$(target-stem).o), $(orig_a_flags)) +_cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(target-stem).lds) # # Enable gcov profiling flags for a file, directory or for all files depending @@ -159,6 +162,18 @@ _cpp_flags += -I $(srctree)/$(src) -I $(objtree)/$(obj) endif endif +part-of-module = $(if $(filter $(basename $@).o, $(real-obj-m)),y) +quiet_modtag = $(if $(part-of-module),[M], ) + +modkern_cflags = \ + $(if $(part-of-module), \ + $(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE), \ + $(KBUILD_CFLAGS_KERNEL) $(CFLAGS_KERNEL)) + +modkern_aflags = $(if $(part-of-module), \ + $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE), \ + $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL)) + c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ -include $(srctree)/include/linux/compiler_types.h \ $(_c_flags) $(modkern_cflags) \ @@ -187,28 +202,6 @@ $(foreach m, $(notdir $1), \ $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s))))))) endef -# LEX -# --------------------------------------------------------------------------- -quiet_cmd_flex = LEX $@ - cmd_flex = $(LEX) -o$@ -L $< - -$(obj)/%.lex.c: $(src)/%.l FORCE - $(call if_changed,flex) - -# YACC -# --------------------------------------------------------------------------- -quiet_cmd_bison = YACC $@ - cmd_bison = $(YACC) -o$@ -t -l $< - -$(obj)/%.tab.c: $(src)/%.y FORCE - $(call if_changed,bison) - -quiet_cmd_bison_h = YACC $@ - cmd_bison_h = $(YACC) -o/dev/null --defines=$@ -t -l $< - -$(obj)/%.tab.h: $(src)/%.y FORCE - $(call if_changed,bison_h) - # Shipped files # =========================================================================== @@ -258,7 +251,7 @@ quiet_cmd_gzip = GZIP $@ DTC ?= $(objtree)/scripts/dtc/dtc # Disable noisy checks by default -ifeq ($(findstring 1,$(KBUILD_ENABLE_EXTRA_GCC_CHECKS)),) +ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),) DTC_FLAGS += -Wno-unit_address_vs_reg \ -Wno-unit_address_format \ -Wno-avoid_unnecessary_addr_size \ @@ -269,7 +262,7 @@ DTC_FLAGS += -Wno-unit_address_vs_reg \ -Wno-pci_device_reg endif -ifneq ($(findstring 2,$(KBUILD_ENABLE_EXTRA_GCC_CHECKS)),) +ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),) DTC_FLAGS += -Wnode_name_chars_strict \ -Wproperty_name_chars_strict endif @@ -374,7 +367,7 @@ UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR) UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)' quiet_cmd_uimage = UIMAGE $@ - cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \ + cmd_uimage = $(BASH) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \ -C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \ -T $(UIMAGE_TYPE) \ -a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \ diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal new file mode 100644 index 00000000000000..411c1e600e7dd8 --- /dev/null +++ b/scripts/Makefile.modfinal @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: GPL-2.0-only +# =========================================================================== +# Module final link +# =========================================================================== + +PHONY := __modfinal +__modfinal: + +include $(srctree)/scripts/Kbuild.include + +# for c_flags +include $(srctree)/scripts/Makefile.lib + +# find all modules listed in modules.order +modules := $(sort $(shell cat $(MODORDER))) + +__modfinal: $(modules) + @: + +# modname and part-of-module are set to make c_flags define proper module flags +modname = $(notdir $(@:.mod.o=)) +part-of-module = y + +quiet_cmd_cc_o_c = CC [M] $@ + cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< + +%.mod.o: %.mod.c FORCE + $(call if_changed_dep,cc_o_c) + +ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) + +quiet_cmd_ld_ko_o = LD [M] $@ + cmd_ld_ko_o = \ + $(LD) -r $(KBUILD_LDFLAGS) \ + $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \ + $(addprefix -T , $(KBUILD_LDS_MODULE)) \ + -o $@ $(filter %.o, $^); \ + $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) + +$(modules): %.ko: %.o %.mod.o $(KBUILD_LDS_MODULE) FORCE + +$(call if_changed,ld_ko_o) + +targets += $(modules) $(modules:.ko=.mod.o) + +# Add FORCE to the prequisites of a target to force it to be always rebuilt. +# --------------------------------------------------------------------------- + +PHONY += FORCE +FORCE: + +# Read all saved command lines and dependencies for the $(targets) we +# may be building above, using $(if_changed{,_dep}). As an +# optimization, we don't need to read them if the target does not +# exist, we will rebuild anyway in that case. + +existing-targets := $(wildcard $(sort $(targets))) + +-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) + +.PHONY: $(PHONY) diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 26e6574ecd08d4..9800a3988f23e7 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -15,15 +15,13 @@ # 2) modpost is then used to # 3) create one .mod.c file pr. module # 4) create one Module.symvers file with CRC for all exported symbols -# 5) compile all .mod.c files -# 6) final link of the module to a file # Step 3 is used to place certain information in the module's ELF # section, including information such as: # Version magic (see include/linux/vermagic.h for full details) # - Kernel release # - SMP is CONFIG_SMP -# - PREEMPT is CONFIG_PREEMPT +# - PREEMPT is CONFIG_PREEMPT[_RT] # - GCC Version # Module info # - Module version (MODULE_VERSION) @@ -60,13 +58,10 @@ MODPOST = scripts/mod/modpost \ ifdef MODPOST_VMLINUX -__modpost: vmlinux.o +quiet_cmd_modpost = MODPOST vmlinux.o + cmd_modpost = $(MODPOST) vmlinux.o -quiet_cmd_modpost = MODPOST $@ - cmd_modpost = $(MODPOST) $@ - -PHONY += vmlinux.o -vmlinux.o: +__modpost: $(call cmd,modpost) else @@ -83,73 +78,22 @@ include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \ $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile) endif -include scripts/Makefile.lib +MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux) # find all modules listed in modules.order modules := $(sort $(shell cat $(MODORDER))) -# Stop after building .o files if NOFINAL is set. Makes compile tests quicker -__modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) - @: - -MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux) - -# We can go over command line length here, so be careful. +# Read out modules.order instead of expanding $(modules) to pass in modpost. +# Otherwise, allmodconfig would fail with "Argument list too long". quiet_cmd_modpost = MODPOST $(words $(modules)) modules cmd_modpost = sed 's/ko$$/o/' $(MODORDER) | $(MODPOST) -PHONY += modules-modpost -modules-modpost: +__modpost: + @$(kecho) ' Building modules, stage 2.' $(call cmd,modpost) - -# Declare generated files as targets for modpost -$(modules:.ko=.mod.c): modules-modpost - -# Step 5), compile all *.mod.c files - -# modname is set to make c_flags define KBUILD_MODNAME -modname = $(notdir $(@:.mod.o=)) - -quiet_cmd_cc_o_c = CC $@ - cmd_cc_o_c = $(CC) $(c_flags) $(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE) \ - -c -o $@ $< - -$(modules:.ko=.mod.o): %.mod.o: %.mod.c FORCE - $(call if_changed_dep,cc_o_c) - -targets += $(modules:.ko=.mod.o) - -ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) - -# Step 6), final link of the modules with optional arch pass after final link -quiet_cmd_ld_ko_o = LD [M] $@ - cmd_ld_ko_o = \ - $(LD) -r $(KBUILD_LDFLAGS) \ - $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \ - -o $@ $(real-prereqs) ; \ - $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) - -$(modules): %.ko :%.o %.mod.o FORCE - +$(call if_changed,ld_ko_o) - -targets += $(modules) - - -# Add FORCE to the prequisites of a target to force it to be always rebuilt. -# --------------------------------------------------------------------------- - -PHONY += FORCE - -FORCE: - -# Read all saved command lines and dependencies for the $(targets) we -# may be building above, using $(if_changed{,_dep}). As an -# optimization, we don't need to read them if the target does not -# exist, we will rebuild anyway in that case. - -existing-targets := $(wildcard $(sort $(targets))) - --include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) +ifneq ($(KBUILD_MODPOST_NOFINAL),1) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modfinal +endif endif diff --git a/scripts/package/Makefile b/scripts/Makefile.package similarity index 92% rename from scripts/package/Makefile rename to scripts/Makefile.package index ca7f46b562a4ff..56eadcc48d46de 100644 --- a/scripts/package/Makefile +++ b/scripts/Makefile.package @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only # Makefile for the different targets used to generate full packages of a kernel -# It uses the generic clean infrastructure of kbuild + +include $(srctree)/scripts/Kbuild.include # RPM target # --------------------------------------------------------------------------- @@ -50,7 +51,8 @@ rm -f $(objtree)/.scmversion # rpm-pkg # --------------------------------------------------------------------------- -rpm-pkg: FORCE +PHONY += rpm-pkg +rpm-pkg: $(MAKE) clean $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec $(call cmd,src_tar,$(KERNELPATH),kernel.spec) @@ -59,15 +61,15 @@ rpm-pkg: FORCE # binrpm-pkg # --------------------------------------------------------------------------- -binrpm-pkg: FORCE +PHONY += binrpm-pkg +binrpm-pkg: $(MAKE) -f $(srctree)/Makefile $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ $(UTS_MACHINE) -bb $(objtree)/binkernel.spec -clean-files += $(objtree)/*.spec - -deb-pkg: FORCE +PHONY += deb-pkg +deb-pkg: $(MAKE) clean $(CONFIG_SHELL) $(srctree)/scripts/package/mkdebian $(call cmd,src_tar,$(KDEB_SOURCENAME)) @@ -75,18 +77,19 @@ deb-pkg: FORCE mv $(KDEB_SOURCENAME).tar.gz ../$(KDEB_SOURCENAME)_$${origversion}.orig.tar.gz +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) $(DPKG_FLAGS) -i.git -us -uc -bindeb-pkg: FORCE +PHONY += bindeb-pkg +bindeb-pkg: $(CONFIG_SHELL) $(srctree)/scripts/package/mkdebian +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) $(DPKG_FLAGS) -b -nc -uc -intdeb-pkg: FORCE +PHONY += intdeb-pkg +intdeb-pkg: +$(CONFIG_SHELL) $(srctree)/scripts/package/builddeb -clean-dirs += $(objtree)/debian/ - # snap-pkg # --------------------------------------------------------------------------- -snap-pkg: FORCE +PHONY += snap-pkg +snap-pkg: rm -rf $(objtree)/snap mkdir $(objtree)/snap $(MAKE) clean @@ -98,17 +101,14 @@ snap-pkg: FORCE cd $(objtree)/snap && \ snapcraft --target-arch=$(UTS_MACHINE) -clean-dirs += $(objtree)/snap/ - # tarball targets # --------------------------------------------------------------------------- -tar%pkg: FORCE +tar-pkgs := tar-pkg targz-pkg tarbz2-pkg tarxz-pkg +PHONY += $(tar-pkgs) +$(tar-pkgs): $(MAKE) -f $(srctree)/Makefile +$(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@ -clean-dirs += $(objtree)/tar-install/ - - # perf-pkg - generate a source tarball with perf source # --------------------------------------------------------------------------- @@ -133,12 +133,15 @@ $(if $(findstring xz,$@),xz, \ $(error unknown target $@)))) \ -f -9 $(perf-tar).tar) -perf-%pkg: FORCE +perf-tar-pkgs := perf-tar-src-pkg perf-targz-src-pkg perf-tarbz2-src-pkg perf-tarxz-src-pkg +PHONY += $(perf-tar-pkgs) +$(perf-tar-pkgs): $(call cmd,perf_tar) # Help text displayed when executing 'make help' # --------------------------------------------------------------------------- -help: FORCE +PHONY += help +help: @echo ' rpm-pkg - Build both source and binary RPM kernel packages' @echo ' binrpm-pkg - Build only the binary kernel RPM package' @echo ' deb-pkg - Build both source and binary deb kernel packages' @@ -152,3 +155,5 @@ help: FORCE @echo ' perf-targz-src-pkg - Build $(perf-tar).tar.gz source tarball' @echo ' perf-tarbz2-src-pkg - Build $(perf-tar).tar.bz2 source tarball' @echo ' perf-tarxz-src-pkg - Build $(perf-tar).tar.xz source tarball' + +.PHONY: $(PHONY) diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile index 548aeb59280662..7c9cb80d097b42 100644 --- a/scripts/basic/Makefile +++ b/scripts/basic/Makefile @@ -1,16 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -### -# This Makefile lists the most basic programs used during the build process. -# The programs listed herein are what are needed to do the basic stuff, -# such as fix file dependencies. -# This initial step is needed to avoid files to be recompiled -# when kernel configuration changes (which is what happens when -# .config is included by main Makefile. -# --------------------------------------------------------------------------- -# fixdep: Used to generate dependency information during build process +# +# fixdep: used to generate dependency information during build process hostprogs-y := fixdep always := $(hostprogs-y) - -# fixdep is needed to compile other host programs -$(addprefix $(obj)/,$(filter-out fixdep,$(always))): $(obj)/fixdep diff --git a/scripts/genksyms/Makefile b/scripts/genksyms/Makefile index 66c314bc5933ed..78629f515e782f 100644 --- a/scripts/genksyms/Makefile +++ b/scripts/genksyms/Makefile @@ -12,22 +12,15 @@ genksyms-objs := genksyms.o parse.tab.o lex.lex.o # # Just in case, run "$(YACC) --version" without suppressing stderr # so that 'bison: not found' will be displayed if it is missing. -ifeq ($(findstring 1,$(KBUILD_ENABLE_EXTRA_GCC_CHECKS)),) +ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),) quiet_cmd_bison_no_warn = $(quiet_cmd_bison) cmd_bison_no_warn = $(YACC) --version >/dev/null; \ $(cmd_bison) 2>/dev/null -$(obj)/parse.tab.c: $(src)/parse.y FORCE +$(obj)/pars%.tab.c $(obj)/pars%.tab.h: $(src)/pars%.y FORCE $(call if_changed,bison_no_warn) -quiet_cmd_bison_h_no_warn = $(quiet_cmd_bison_h) - cmd_bison_h_no_warn = $(YACC) --version >/dev/null; \ - $(cmd_bison_h) 2>/dev/null - -$(obj)/parse.tab.h: $(src)/parse.y FORCE - $(call if_changed,bison_h_no_warn) - endif # -I needed for generated C source (shipped source) diff --git a/scripts/genksyms/keywords.c b/scripts/genksyms/keywords.c index c586d32dd2c358..7a85c4e2117509 100644 --- a/scripts/genksyms/keywords.c +++ b/scripts/genksyms/keywords.c @@ -3,11 +3,7 @@ static struct resword { const char *name; int token; } keywords[] = { - { "EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW }, - { "EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW }, - { "EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW }, - { "EXPORT_UNUSED_SYMBOL", EXPORT_SYMBOL_KEYW }, - { "EXPORT_UNUSED_SYMBOL_GPL", EXPORT_SYMBOL_KEYW }, + { "__GENKSYMS_EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW }, { "__asm", ASM_KEYW }, { "__asm__", ASM_KEYW }, { "__attribute", ATTRIBUTE_KEYW }, diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l index d29c774f51b615..e265c5d9686122 100644 --- a/scripts/genksyms/lex.l +++ b/scripts/genksyms/lex.l @@ -1,25 +1,13 @@ -/* Lexical analysis for genksyms. - Copyright 1996, 1997 Linux International. - - New implementation contributed by Richard Henderson - Based on original work by Bjorn Ekwall - - Taken from Linux modutils 2.4.22. - - This program is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the - Free Software Foundation; either version 2 of the License, or (at your - option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software Foundation, - Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ - +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Lexical analysis for genksyms. + * Copyright 1996, 1997 Linux International. + * + * New implementation contributed by Richard Henderson + * Based on original work by Bjorn Ekwall + * + * Taken from Linux modutils 2.4.22. + */ %{ diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y index 1ebcf52cd0f9e0..e22b42245bcc23 100644 --- a/scripts/genksyms/parse.y +++ b/scripts/genksyms/parse.y @@ -1,25 +1,13 @@ -/* C global declaration parser for genksyms. - Copyright 1996, 1997 Linux International. - - New implementation contributed by Richard Henderson - Based on original work by Bjorn Ekwall - - This file is part of the Linux modutils. - - This program is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the - Free Software Foundation; either version 2 of the License, or (at your - option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software Foundation, - Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ - +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * C global declaration parser for genksyms. + * Copyright 1996, 1997 Linux International. + * + * New implementation contributed by Richard Henderson + * Based on original work by Bjorn Ekwall + * + * This file is part of the Linux modutils. + */ %{ diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index bbaf293869956f..a07668a5c36b1c 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -41,5 +41,77 @@ sed -E -e ' scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__ $TMPFILE > $OUTFILE [ $? -gt 1 ] && exit 1 +# Remove /* ... */ style comments, and find CONFIG_ references in code +configs=$(sed -e ' +:comment + s:/\*[^*][^*]*:/*: + s:/\*\*\**\([^/]\):/*\1: + t comment + s:/\*\*/: : + t comment + /\/\*/! b check + N + b comment +:print + P + D +:check + s:^\(CONFIG_[[:alnum:]_]*\):\1\n: + t print + s:^[[:alnum:]_][[:alnum:]_]*:: + s:^[^[:alnum:]_][^[:alnum:]_]*:: + t check + d +' $OUTFILE) + +# The entries in the following list are not warned. +# Please do not add a new entry. This list is only for existing ones. +# The list will be reduced gradually, and deleted eventually. (hopefully) +# +# The format is : in each line. +config_leak_ignores=" +arch/alpha/include/uapi/asm/setup.h:CONFIG_ALPHA_LEGACY_START_ADDRESS +arch/arc/include/uapi/asm/page.h:CONFIG_ARC_PAGE_SIZE_16K +arch/arc/include/uapi/asm/page.h:CONFIG_ARC_PAGE_SIZE_4K +arch/arc/include/uapi/asm/swab.h:CONFIG_ARC_HAS_SWAPE +arch/arm/include/uapi/asm/ptrace.h:CONFIG_CPU_ENDIAN_BE8 +arch/hexagon/include/uapi/asm/ptrace.h:CONFIG_HEXAGON_ARCH_VERSION +arch/hexagon/include/uapi/asm/user.h:CONFIG_HEXAGON_ARCH_VERSION +arch/ia64/include/uapi/asm/cmpxchg.h:CONFIG_IA64_DEBUG_CMPXCHG +arch/m68k/include/uapi/asm/ptrace.h:CONFIG_COLDFIRE +arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_NO +arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_SUPPORT +arch/sh/include/uapi/asm/ptrace.h:CONFIG_CPU_SH5 +arch/sh/include/uapi/asm/sigcontext.h:CONFIG_CPU_SH5 +arch/sh/include/uapi/asm/stat.h:CONFIG_CPU_SH5 +arch/x86/include/uapi/asm/auxvec.h:CONFIG_IA32_EMULATION +arch/x86/include/uapi/asm/auxvec.h:CONFIG_X86_64 +arch/x86/include/uapi/asm/mman.h:CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +include/uapi/asm-generic/fcntl.h:CONFIG_64BIT +include/uapi/linux/atmdev.h:CONFIG_COMPAT +include/uapi/linux/elfcore.h:CONFIG_BINFMT_ELF_FDPIC +include/uapi/linux/eventpoll.h:CONFIG_PM_SLEEP +include/uapi/linux/hw_breakpoint.h:CONFIG_HAVE_MIXED_BREAKPOINTS_REGS +include/uapi/linux/pktcdvd.h:CONFIG_CDROM_PKTCDVD_WCACHE +include/uapi/linux/raw.h:CONFIG_MAX_RAW_DEVS +" + +for c in $configs +do + warn=1 + + for ignore in $config_leak_ignores + do + if echo "$INFILE:$c" | grep -q "$ignore$"; then + warn= + break + fi + done + + if [ "$warn" = 1 ]; then + echo "warning: $INFILE: leak $c to user-space" >&2 + fi +done + rm -f $TMPFILE trap - EXIT diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 7656e1137b6be5..ef2f2336c46960 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -114,7 +114,7 @@ testconfig: $(obj)/conf $(PYTHON3) -B -m pytest $(srctree)/$(src)/tests \ -o cache_dir=$(abspath $(obj)/tests/.cache) \ $(if $(findstring 1,$(KBUILD_VERBOSE)),--capture=no) -clean-dirs += tests/.cache +clean-files += tests/.cache # Help text used by make help help: @@ -166,15 +166,15 @@ $(obj)/nconf.o $(obj)/nconf.gui.o: $(obj)/nconf-cfg # mconf: Used for the menuconfig target based on lxdialog hostprogs-y += mconf -lxdialog := checklist.o inputbox.o menubox.o textbox.o util.o yesno.o -mconf-objs := mconf.o $(addprefix lxdialog/, $(lxdialog)) $(common-objs) +lxdialog := $(addprefix lxdialog/, \ + checklist.o inputbox.o menubox.o textbox.o util.o yesno.o) +mconf-objs := mconf.o $(lxdialog) $(common-objs) HOSTLDLIBS_mconf = $(shell . $(obj)/mconf-cfg && echo $$libs) $(foreach f, mconf.o $(lxdialog), \ $(eval HOSTCFLAGS_$f = $$(shell . $(obj)/mconf-cfg && echo $$$$cflags))) -$(obj)/mconf.o: $(obj)/mconf-cfg -$(addprefix $(obj)/lxdialog/, $(lxdialog)): $(obj)/mconf-cfg +$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/mconf-cfg # qconf: Used for the xconfig target based on Qt hostprogs-y += qconf diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index d924c51d28b7b8..63c8565206a451 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -13,12 +13,12 @@ # Copyright (c) 2009-2010 Wind River Systems, Inc. # Copyright 2011 Linaro +set -e + clean_up() { rm -f $TMP_FILE rm -f $MERGE_FILE - exit } -trap clean_up HUP INT TERM usage() { echo "Usage: $0 [OPTIONS] [CONFIG [...]]" @@ -110,6 +110,9 @@ TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) MERGE_FILE=$(mktemp ./.merge_tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" + +trap clean_up EXIT + cat $INITFILE > $TMP_FILE # Merge files, printing warnings on overridden values @@ -155,7 +158,6 @@ if [ "$RUNMAKE" = "false" ]; then echo "#" echo "# merged configuration written to $KCONFIG_CONFIG (needs make)" echo "#" - clean_up exit fi @@ -177,7 +179,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE) - ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG") + ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG" || true) if [ "x$REQUESTED_VAL" != "x$ACTUAL_VAL" ] ; then echo "Value requested for $CFG not in final .config" echo "Requested value: $REQUESTED_VAL" @@ -185,5 +187,3 @@ for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do echo "" fi done - -clean_up diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 8c59970a09dce2..06495379fcd884 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -64,6 +64,8 @@ vmlinux_link() local output=${1} local objects + info LD ${output} + # skip output file argument shift @@ -157,6 +159,18 @@ kallsyms() ${CC} ${aflags} -c -o ${2} ${afile} } +# Perform one step in kallsyms generation, including temporary linking of +# vmlinux. +kallsyms_step() +{ + kallsymso_prev=${kallsymso} + kallsymso=.tmp_kallsyms${1}.o + kallsyms_vmlinux=.tmp_vmlinux${1} + + vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o} + kallsyms ${kallsyms_vmlinux} ${kallsymso} +} + # Create map file with all symbols from ${1} # See mksymap for additional details mksysmap() @@ -243,6 +257,7 @@ if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then fi kallsymso="" +kallsymso_prev="" kallsyms_vmlinux="" if [ -n "${CONFIG_KALLSYMS}" ]; then @@ -269,32 +284,19 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then # a) Verify that the System.map from vmlinux matches the map from # ${kallsymso}. - kallsymso=.tmp_kallsyms2.o - kallsyms_vmlinux=.tmp_vmlinux2 - - # step 1 - vmlinux_link .tmp_vmlinux1 ${btf_vmlinux_bin_o} - kallsyms .tmp_vmlinux1 .tmp_kallsyms1.o - - # step 2 - vmlinux_link .tmp_vmlinux2 .tmp_kallsyms1.o ${btf_vmlinux_bin_o} - kallsyms .tmp_vmlinux2 .tmp_kallsyms2.o + kallsyms_step 1 + kallsyms_step 2 # step 3 - size1=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" .tmp_kallsyms1.o) - size2=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" .tmp_kallsyms2.o) + size1=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" ${kallsymso_prev}) + size2=$(${CONFIG_SHELL} "${srctree}/scripts/file-size.sh" ${kallsymso}) if [ $size1 -ne $size2 ] || [ -n "${KALLSYMS_EXTRA_PASS}" ]; then - kallsymso=.tmp_kallsyms3.o - kallsyms_vmlinux=.tmp_vmlinux3 - - vmlinux_link .tmp_vmlinux3 .tmp_kallsyms2.o ${btf_vmlinux_bin_o} - kallsyms .tmp_vmlinux3 .tmp_kallsyms3.o + kallsyms_step 3 fi fi -info LD vmlinux -vmlinux_link vmlinux "${kallsymso}" "${btf_vmlinux_bin_o}" +vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o} if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then info SORTEX vmlinux diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 2339f86126cb8f..d1d757c6edf4f6 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -5,7 +5,8 @@ TARGET=$1 ARCH=$2 SMP=$3 PREEMPT=$4 -CC=$5 +PREEMPT_RT=$5 +CC=$6 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } @@ -53,6 +54,7 @@ UTS_VERSION="#$VERSION" CONFIG_FLAGS="" if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi +if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length diff --git a/scripts/mkmakefile b/scripts/mkmakefile index 4d0faebb171997..1cb17475142911 100755 --- a/scripts/mkmakefile +++ b/scripts/mkmakefile @@ -12,6 +12,6 @@ if [ "${quiet}" != "silent_" ]; then fi cat << EOF > Makefile -# Automatically generated by $(realpath $0): don't edit -include $(realpath $1/Makefile) +# Automatically generated by $0: don't edit +include $1/Makefile EOF diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index e17a29ae2e97ad..c91eba751804bd 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -34,6 +34,11 @@ typedef Elf64_Addr kernel_ulong_t; typedef uint32_t __u32; typedef uint16_t __u16; typedef unsigned char __u8; +typedef struct { + __u8 b[16]; +} guid_t; + +/* backwards compatibility, don't use in new code */ typedef struct { __u8 b[16]; } uuid_le; diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index f277e116e0ebf6..820eed87fb43bf 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -169,6 +169,7 @@ struct symbol { unsigned int kernel:1; /* 1 if symbol is from kernel * (only for external modules) **/ unsigned int preloaded:1; /* 1 if symbol from Module.symvers, or crc */ + unsigned int is_static:1; /* 1 if symbol is not global */ enum export export; /* Type of export */ char name[0]; }; @@ -201,6 +202,7 @@ static struct symbol *alloc_symbol(const char *name, unsigned int weak, strcpy(s->name, name); s->weak = weak; s->next = next; + s->is_static = 1; return s; } @@ -795,9 +797,9 @@ static int match(const char *sym, const char * const pat[]) /* "*foo*" */ if (*p == '*' && *endp == '*') { - char *here, *bare = strndup(p + 1, strlen(p) - 2); + char *bare = NOFAIL(strndup(p + 1, strlen(p) - 2)); + char *here = strstr(sym, bare); - here = strstr(sym, bare); free(bare); if (here != NULL) return 1; @@ -1980,6 +1982,21 @@ static void read_symbols(const char *modname) handle_modversions(mod, &info, sym, symname); handle_moddevtable(mod, &info, sym, symname); } + + // check for static EXPORT_SYMBOL_* functions && global vars + for (sym = info.symtab_start; sym < info.symtab_stop; sym++) { + unsigned char bind = ELF_ST_BIND(sym->st_info); + + if (bind == STB_GLOBAL || bind == STB_WEAK) { + struct symbol *s = + find_symbol(remove_dot(info.strtab + + sym->st_name)); + + if (s) + s->is_static = 0; + } + } + if (!is_vmlinux(modname) || vmlinux_section_warnings) check_sec_ref(mod, modname, &info); @@ -2159,7 +2176,7 @@ static void add_header(struct buffer *b, struct module *mod) buf_printf(b, "MODULE_INFO(name, KBUILD_MODNAME);\n"); buf_printf(b, "\n"); buf_printf(b, "__visible struct module __this_module\n"); - buf_printf(b, "__attribute__((section(\".gnu.linkonce.this_module\"))) = {\n"); + buf_printf(b, "__section(.gnu.linkonce.this_module) = {\n"); buf_printf(b, "\t.name = KBUILD_MODNAME,\n"); if (mod->has_init) buf_printf(b, "\t.init = init_module,\n"); @@ -2213,8 +2230,7 @@ static int add_versions(struct buffer *b, struct module *mod) buf_printf(b, "\n"); buf_printf(b, "static const struct modversion_info ____versions[]\n"); - buf_printf(b, "__used\n"); - buf_printf(b, "__attribute__((section(\"__versions\"))) = {\n"); + buf_printf(b, "__used __section(__versions) = {\n"); for (s = mod->unres; s; s = s->next) { if (!s->module) @@ -2250,10 +2266,7 @@ static void add_depends(struct buffer *b, struct module *mod) s->module->seen = is_vmlinux(s->module->name); buf_printf(b, "\n"); - buf_printf(b, "static const char __module_depends[]\n"); - buf_printf(b, "__used\n"); - buf_printf(b, "__attribute__((section(\".modinfo\"))) =\n"); - buf_printf(b, "\"depends="); + buf_printf(b, "MODULE_INFO(depends, \""); for (s = mod->unres; s; s = s->next) { const char *p; if (!s->module) @@ -2271,7 +2284,7 @@ static void add_depends(struct buffer *b, struct module *mod) buf_printf(b, "%s%s", first ? "" : ",", p); first = 0; } - buf_printf(b, "\";\n"); + buf_printf(b, "\");\n"); } static void add_srcversion(struct buffer *b, struct module *mod) @@ -2369,6 +2382,7 @@ static void read_dump(const char *fname, unsigned int kernel) s = sym_add_exported(symname, mod, export_no(export)); s->kernel = kernel; s->preloaded = 1; + s->is_static = 0; sym_update_crc(symname, mod, crc, export_no(export)); } release_file(file, size); @@ -2425,6 +2439,7 @@ int main(int argc, char **argv) char *dump_write = NULL, *files_source = NULL; int opt; int err; + int n; struct ext_sym_list *extsym_iter; struct ext_sym_list *extsym_start = NULL; @@ -2520,6 +2535,19 @@ int main(int argc, char **argv) if (sec_mismatch_count && sec_mismatch_fatal) fatal("modpost: Section mismatches detected.\n" "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n"); + for (n = 0; n < SYMBOL_HASH_SIZE; n++) { + struct symbol *s = symbolhash[n]; + + while (s) { + if (s->is_static) + warn("\"%s\" [%s] is a static %s\n", + s->name, s->module->name, + export_str(s->export)); + + s = s->next; + } + } + free(buf.p); return err; diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 8387a9bc064a59..612268eabef463 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -43,56 +42,37 @@ static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ static struct stat sb; /* Remember .st_size, etc. */ -static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ static const char *altmcount; /* alternate mcount symbol name */ static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ static void *file_map; /* pointer of the mapped file */ static void *file_end; /* pointer to the end of the mapped file */ static int file_updated; /* flag to state file was changed */ static void *file_ptr; /* current file pointer location */ + static void *file_append; /* added to the end of the file */ static size_t file_append_size; /* how much is added to end of file */ -/* setjmp() return values */ -enum { - SJ_SETJMP = 0, /* hardwired first return */ - SJ_FAIL, - SJ_SUCCEED -}; - /* Per-file resource cleanup when multiple files. */ -static void -cleanup(void) +static void file_append_cleanup(void) { - if (!mmap_failed) - munmap(file_map, sb.st_size); - else - free(file_map); - file_map = NULL; free(file_append); file_append = NULL; file_append_size = 0; file_updated = 0; } -static void __attribute__((noreturn)) -fail_file(void) +static void mmap_cleanup(void) { - cleanup(); - longjmp(jmpenv, SJ_FAIL); -} - -static void __attribute__((noreturn)) -succeed_file(void) -{ - cleanup(); - longjmp(jmpenv, SJ_SUCCEED); + if (!mmap_failed) + munmap(file_map, sb.st_size); + else + free(file_map); + file_map = NULL; } -/* ulseek, uread, ...: Check return value for errors. */ +/* ulseek, uwrite, ...: Check return value for errors. */ -static off_t -ulseek(int const fd, off_t const offset, int const whence) +static off_t ulseek(off_t const offset, int const whence) { switch (whence) { case SEEK_SET: @@ -107,24 +87,12 @@ ulseek(int const fd, off_t const offset, int const whence) } if (file_ptr < file_map) { fprintf(stderr, "lseek: seek before file\n"); - fail_file(); + return -1; } return file_ptr - file_map; } -static size_t -uread(int const fd, void *const buf, size_t const count) -{ - size_t const n = read(fd, buf, count); - if (n != count) { - perror("read"); - fail_file(); - } - return n; -} - -static size_t -uwrite(int const fd, void const *const buf, size_t const count) +static ssize_t uwrite(void const *const buf, size_t const count) { size_t cnt = count; off_t idx = 0; @@ -140,7 +108,9 @@ uwrite(int const fd, void const *const buf, size_t const count) } if (!file_append) { perror("write"); - fail_file(); + file_append_cleanup(); + mmap_cleanup(); + return -1; } if (file_ptr < file_end) { cnt = file_end - file_ptr; @@ -160,17 +130,81 @@ uwrite(int const fd, void const *const buf, size_t const count) return count; } -static void * -umalloc(size_t size) +static void * umalloc(size_t size) { void *const addr = malloc(size); if (addr == 0) { fprintf(stderr, "malloc failed: %zu bytes\n", size); - fail_file(); + file_append_cleanup(); + mmap_cleanup(); + return NULL; } return addr; } +/* + * Get the whole file as a programming convenience in order to avoid + * malloc+lseek+read+free of many pieces. If successful, then mmap + * avoids copying unused pieces; else just read the whole file. + * Open for both read and write; new info will be appended to the file. + * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr + * do not propagate to the file until an explicit overwrite at the last. + * This preserves most aspects of consistency (all except .st_size) + * for simultaneous readers of the file while we are appending to it. + * However, multiple writers still are bad. We choose not to use + * locking because it is expensive and the use case of kernel build + * makes multiple writers unlikely. + */ +static void *mmap_file(char const *fname) +{ + /* Avoid problems if early cleanup() */ + fd_map = -1; + mmap_failed = 1; + file_map = NULL; + file_ptr = NULL; + file_updated = 0; + sb.st_size = 0; + + fd_map = open(fname, O_RDONLY); + if (fd_map < 0) { + perror(fname); + return NULL; + } + if (fstat(fd_map, &sb) < 0) { + perror(fname); + goto out; + } + if (!S_ISREG(sb.st_mode)) { + fprintf(stderr, "not a regular file: %s\n", fname); + goto out; + } + file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, + fd_map, 0); + if (file_map == MAP_FAILED) { + mmap_failed = 1; + file_map = umalloc(sb.st_size); + if (!file_map) { + perror(fname); + goto out; + } + if (read(fd_map, file_map, sb.st_size) != sb.st_size) { + perror(fname); + free(file_map); + file_map = NULL; + goto out; + } + } else + mmap_failed = 0; +out: + close(fd_map); + fd_map = -1; + + file_end = file_map + sb.st_size; + + return file_map; +} + + static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 }; static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; static unsigned char *ideal_nop; @@ -194,8 +228,10 @@ static int make_nop_x86(void *map, size_t const offset) return -1; /* convert to nop */ - ulseek(fd_map, offset - 1, SEEK_SET); - uwrite(fd_map, ideal_nop, 5); + if (ulseek(offset - 1, SEEK_SET) < 0) + return -1; + if (uwrite(ideal_nop, 5) < 0) + return -1; return 0; } @@ -243,10 +279,12 @@ static int make_nop_arm(void *map, size_t const offset) return -1; /* Convert to nop */ - ulseek(fd_map, off, SEEK_SET); + if (ulseek(off, SEEK_SET) < 0) + return -1; do { - uwrite(fd_map, ideal_nop, nop_size); + if (uwrite(ideal_nop, nop_size) < 0) + return -1; } while (--cnt > 0); return 0; @@ -263,57 +301,20 @@ static int make_nop_arm64(void *map, size_t const offset) return -1; /* Convert to nop */ - ulseek(fd_map, offset, SEEK_SET); - uwrite(fd_map, ideal_nop, 4); + if (ulseek(offset, SEEK_SET) < 0) + return -1; + if (uwrite(ideal_nop, 4) < 0) + return -1; return 0; } -/* - * Get the whole file as a programming convenience in order to avoid - * malloc+lseek+read+free of many pieces. If successful, then mmap - * avoids copying unused pieces; else just read the whole file. - * Open for both read and write; new info will be appended to the file. - * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr - * do not propagate to the file until an explicit overwrite at the last. - * This preserves most aspects of consistency (all except .st_size) - * for simultaneous readers of the file while we are appending to it. - * However, multiple writers still are bad. We choose not to use - * locking because it is expensive and the use case of kernel build - * makes multiple writers unlikely. - */ -static void *mmap_file(char const *fname) -{ - fd_map = open(fname, O_RDONLY); - if (fd_map < 0 || fstat(fd_map, &sb) < 0) { - perror(fname); - fail_file(); - } - if (!S_ISREG(sb.st_mode)) { - fprintf(stderr, "not a regular file: %s\n", fname); - fail_file(); - } - file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, - fd_map, 0); - mmap_failed = 0; - if (file_map == MAP_FAILED) { - mmap_failed = 1; - file_map = umalloc(sb.st_size); - uread(fd_map, file_map, sb.st_size); - } - close(fd_map); - - file_end = file_map + sb.st_size; - - return file_map; -} - -static void write_file(const char *fname) +static int write_file(const char *fname) { char tmp_file[strlen(fname) + 4]; size_t n; if (!file_updated) - return; + return 0; sprintf(tmp_file, "%s.rc", fname); @@ -325,25 +326,28 @@ static void write_file(const char *fname) fd_map = open(tmp_file, O_WRONLY | O_TRUNC | O_CREAT, sb.st_mode); if (fd_map < 0) { perror(fname); - fail_file(); + return -1; } n = write(fd_map, file_map, sb.st_size); if (n != sb.st_size) { perror("write"); - fail_file(); + close(fd_map); + return -1; } if (file_append_size) { n = write(fd_map, file_append, file_append_size); if (n != file_append_size) { perror("write"); - fail_file(); + close(fd_map); + return -1; } } close(fd_map); if (rename(tmp_file, fname) < 0) { perror(fname); - fail_file(); + return -1; } + return 0; } /* w8rev, w8nat, ...: Handle endianness. */ @@ -394,8 +398,7 @@ static uint32_t (*w)(uint32_t); static uint32_t (*w2)(uint16_t); /* Names of the sections that could contain calls to mcount. */ -static int -is_mcounted_section_name(char const *const txtname) +static int is_mcounted_section_name(char const *const txtname) { return strncmp(".text", txtname, 5) == 0 || strcmp(".init.text", txtname) == 0 || @@ -405,10 +408,11 @@ is_mcounted_section_name(char const *const txtname) strcmp(".irqentry.text", txtname) == 0 || strcmp(".softirqentry.text", txtname) == 0 || strcmp(".kprobes.text", txtname) == 0 || - strcmp(".cpuidle.text", txtname) == 0 || - strcmp(".text.unlikely", txtname) == 0; + strcmp(".cpuidle.text", txtname) == 0; } +static char const *already_has_rel_mcount = "success"; /* our work here is done! */ + /* 32 bit and 64 bit are very similar */ #include "recordmcount.h" #define RECORD_MCOUNT_64 @@ -447,11 +451,15 @@ static void MIPS64_r_info(Elf64_Rel *const rp, unsigned sym, unsigned type) }).r_info; } -static void -do_file(char const *const fname) +static int do_file(char const *const fname) { - Elf32_Ehdr *const ehdr = mmap_file(fname); unsigned int reltype = 0; + Elf32_Ehdr *ehdr; + int rc = -1; + + ehdr = mmap_file(fname); + if (!ehdr) + goto out; w = w4nat; w2 = w2nat; @@ -461,8 +469,7 @@ do_file(char const *const fname) default: fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", ehdr->e_ident[EI_DATA], fname); - fail_file(); - break; + goto out; case ELFDATA2LSB: if (*(unsigned char const *)&endian != 1) { /* main() is big endian, file.o is little endian. */ @@ -490,52 +497,54 @@ do_file(char const *const fname) push_bl_mcount_thumb = push_bl_mcount_thumb_be; break; } /* end switch */ - if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 - || w2(ehdr->e_type) != ET_REL - || ehdr->e_ident[EI_VERSION] != EV_CURRENT) { + if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 || + w2(ehdr->e_type) != ET_REL || + ehdr->e_ident[EI_VERSION] != EV_CURRENT) { fprintf(stderr, "unrecognized ET_REL file %s\n", fname); - fail_file(); + goto out; } - gpfx = 0; + gpfx = '_'; switch (w2(ehdr->e_machine)) { default: fprintf(stderr, "unrecognized e_machine %u %s\n", w2(ehdr->e_machine), fname); - fail_file(); - break; + goto out; case EM_386: reltype = R_386_32; rel_type_nop = R_386_NONE; make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_32; mcount_adjust_32 = -1; + gpfx = 0; + break; + case EM_ARM: + reltype = R_ARM_ABS32; + altmcount = "__gnu_mcount_nc"; + make_nop = make_nop_arm; + rel_type_nop = R_ARM_NONE; + gpfx = 0; break; - case EM_ARM: reltype = R_ARM_ABS32; - altmcount = "__gnu_mcount_nc"; - make_nop = make_nop_arm; - rel_type_nop = R_ARM_NONE; - break; case EM_AARCH64: - reltype = R_AARCH64_ABS64; - make_nop = make_nop_arm64; - rel_type_nop = R_AARCH64_NONE; - ideal_nop = ideal_nop4_arm64; - gpfx = '_'; - break; - case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; - case EM_MIPS: /* reltype: e_class */ gpfx = '_'; break; - case EM_PPC: reltype = R_PPC_ADDR32; gpfx = '_'; break; - case EM_PPC64: reltype = R_PPC64_ADDR64; gpfx = '_'; break; - case EM_S390: /* reltype: e_class */ gpfx = '_'; break; - case EM_SH: reltype = R_SH_DIR32; break; - case EM_SPARCV9: reltype = R_SPARC_64; gpfx = '_'; break; + reltype = R_AARCH64_ABS64; + make_nop = make_nop_arm64; + rel_type_nop = R_AARCH64_NONE; + ideal_nop = ideal_nop4_arm64; + break; + case EM_IA_64: reltype = R_IA64_IMM64; break; + case EM_MIPS: /* reltype: e_class */ break; + case EM_PPC: reltype = R_PPC_ADDR32; break; + case EM_PPC64: reltype = R_PPC64_ADDR64; break; + case EM_S390: /* reltype: e_class */ break; + case EM_SH: reltype = R_SH_DIR32; gpfx = 0; break; + case EM_SPARCV9: reltype = R_SPARC_64; break; case EM_X86_64: make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_64; reltype = R_X86_64_64; rel_type_nop = R_X86_64_NONE; mcount_adjust_64 = -1; + gpfx = 0; break; } /* end switch */ @@ -543,20 +552,20 @@ do_file(char const *const fname) default: fprintf(stderr, "unrecognized ELF class %d %s\n", ehdr->e_ident[EI_CLASS], fname); - fail_file(); - break; + goto out; case ELFCLASS32: if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr) || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); - fail_file(); + goto out; } if (w2(ehdr->e_machine) == EM_MIPS) { reltype = R_MIPS_32; is_fake_mcount32 = MIPS32_is_fake_mcount; } - do32(ehdr, fname, reltype); + if (do32(ehdr, fname, reltype) < 0) + goto out; break; case ELFCLASS64: { Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; @@ -564,7 +573,7 @@ do_file(char const *const fname) || w2(ghdr->e_shentsize) != sizeof(Elf64_Shdr)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); - fail_file(); + goto out; } if (w2(ghdr->e_machine) == EM_S390) { reltype = R_390_64; @@ -576,17 +585,20 @@ do_file(char const *const fname) Elf64_r_info = MIPS64_r_info; is_fake_mcount64 = MIPS64_is_fake_mcount; } - do64(ghdr, fname, reltype); + if (do64(ghdr, fname, reltype) < 0) + goto out; break; } } /* end switch */ - write_file(fname); - cleanup(); + rc = write_file(fname); +out: + file_append_cleanup(); + mmap_cleanup(); + return rc; } -int -main(int argc, char *argv[]) +int main(int argc, char *argv[]) { const char ftrace[] = "/ftrace.o"; int ftrace_size = sizeof(ftrace) - 1; @@ -613,7 +625,6 @@ main(int argc, char *argv[]) /* Process each file in turn, allowing deep failure. */ for (i = optind; i < argc; i++) { char *file = argv[i]; - int const sjval = setjmp(jmpenv); int len; /* @@ -626,28 +637,10 @@ main(int argc, char *argv[]) strcmp(file + (len - ftrace_size), ftrace) == 0) continue; - switch (sjval) { - default: - fprintf(stderr, "internal error: %s\n", file); - exit(1); - break; - case SJ_SETJMP: /* normal sequence */ - /* Avoid problems if early cleanup() */ - fd_map = -1; - mmap_failed = 1; - file_map = NULL; - file_ptr = NULL; - file_updated = 0; - do_file(file); - break; - case SJ_FAIL: /* error in do_file or below */ + if (do_file(file)) { fprintf(stderr, "%s: failed\n", file); ++n_error; - break; - case SJ_SUCCEED: /* premature success */ - /* do nothing */ - break; - } /* end switch */ + } } return !!n_error; } diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 47fca2c69a73e8..8f0a278ce0af2e 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -174,7 +174,7 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp) } /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */ -static void append_func(Elf_Ehdr *const ehdr, +static int append_func(Elf_Ehdr *const ehdr, Elf_Shdr *const shstr, uint_t const *const mloc0, uint_t const *const mlocp, @@ -202,15 +202,20 @@ static void append_func(Elf_Ehdr *const ehdr, new_e_shoff = t; /* body for new shstrtab */ - ulseek(fd_map, sb.st_size, SEEK_SET); - uwrite(fd_map, old_shstr_sh_offset + (void *)ehdr, old_shstr_sh_size); - uwrite(fd_map, mc_name, 1 + strlen(mc_name)); + if (ulseek(sb.st_size, SEEK_SET) < 0) + return -1; + if (uwrite(old_shstr_sh_offset + (void *)ehdr, old_shstr_sh_size) < 0) + return -1; + if (uwrite(mc_name, 1 + strlen(mc_name)) < 0) + return -1; /* old(modified) Elf_Shdr table, word-byte aligned */ - ulseek(fd_map, t, SEEK_SET); + if (ulseek(t, SEEK_SET) < 0) + return -1; t += sizeof(Elf_Shdr) * old_shnum; - uwrite(fd_map, old_shoff + (void *)ehdr, - sizeof(Elf_Shdr) * old_shnum); + if (uwrite(old_shoff + (void *)ehdr, + sizeof(Elf_Shdr) * old_shnum) < 0) + return -1; /* new sections __mcount_loc and .rel__mcount_loc */ t += 2*sizeof(mcsec); @@ -225,7 +230,8 @@ static void append_func(Elf_Ehdr *const ehdr, mcsec.sh_info = 0; mcsec.sh_addralign = _w(_size); mcsec.sh_entsize = _w(_size); - uwrite(fd_map, &mcsec, sizeof(mcsec)); + if (uwrite(&mcsec, sizeof(mcsec)) < 0) + return -1; mcsec.sh_name = w(old_shstr_sh_size); mcsec.sh_type = (sizeof(Elf_Rela) == rel_entsize) @@ -239,15 +245,22 @@ static void append_func(Elf_Ehdr *const ehdr, mcsec.sh_info = w(old_shnum); mcsec.sh_addralign = _w(_size); mcsec.sh_entsize = _w(rel_entsize); - uwrite(fd_map, &mcsec, sizeof(mcsec)); - uwrite(fd_map, mloc0, (void *)mlocp - (void *)mloc0); - uwrite(fd_map, mrel0, (void *)mrelp - (void *)mrel0); + if (uwrite(&mcsec, sizeof(mcsec)) < 0) + return -1; + + if (uwrite(mloc0, (void *)mlocp - (void *)mloc0) < 0) + return -1; + if (uwrite(mrel0, (void *)mrelp - (void *)mrel0) < 0) + return -1; ehdr->e_shoff = _w(new_e_shoff); ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */ - ulseek(fd_map, 0, SEEK_SET); - uwrite(fd_map, ehdr, sizeof(*ehdr)); + if (ulseek(0, SEEK_SET) < 0) + return -1; + if (uwrite(ehdr, sizeof(*ehdr)) < 0) + return -1; + return 0; } static unsigned get_mcountsym(Elf_Sym const *const sym0, @@ -351,9 +364,9 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, * that are not going to be traced. The mcount calls here will be converted * into nops. */ -static void nop_mcount(Elf_Shdr const *const relhdr, - Elf_Ehdr const *const ehdr, - const char *const txtname) +static int nop_mcount(Elf_Shdr const *const relhdr, + Elf_Ehdr const *const ehdr, + const char *const txtname) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); @@ -376,15 +389,18 @@ static void nop_mcount(Elf_Shdr const *const relhdr, mcountsym = get_mcountsym(sym0, relp, str0); if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { - if (make_nop) + if (make_nop) { ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset)); + if (ret < 0) + return -1; + } if (warn_on_notrace_sect && !once) { printf("Section %s has mcount callers being ignored\n", txtname); once = 1; /* just warn? */ if (!make_nop) - return; + return 0; } } @@ -396,14 +412,16 @@ static void nop_mcount(Elf_Shdr const *const relhdr, Elf_Rel rel; rel = *(Elf_Rel *)relp; Elf_r_info(&rel, Elf_r_sym(relp), rel_type_nop); - ulseek(fd_map, (void *)relp - (void *)ehdr, SEEK_SET); - uwrite(fd_map, &rel, sizeof(rel)); + if (ulseek((void *)relp - (void *)ehdr, SEEK_SET) < 0) + return -1; + if (uwrite(&rel, sizeof(rel)) < 0) + return -1; } relp = (Elf_Rel const *)(rel_entsize + (void *)relp); } + return 0; } - /* * Find a symbol in the given section, to be used as the base for relocating * the table of offsets of calls to mcount. A local or global symbol suffices, @@ -414,9 +432,10 @@ static void nop_mcount(Elf_Shdr const *const relhdr, * Num: Value Size Type Bind Vis Ndx Name * 2: 00000000 0 SECTION LOCAL DEFAULT 1 */ -static unsigned find_secsym_ndx(unsigned const txtndx, +static int find_secsym_ndx(unsigned const txtndx, char const *const txtname, uint_t *const recvalp, + unsigned int *sym_index, Elf_Shdr const *const symhdr, Elf_Ehdr const *const ehdr) { @@ -438,21 +457,20 @@ static unsigned find_secsym_ndx(unsigned const txtndx, continue; *recvalp = _w(symp->st_value); - return symp - sym0; + *sym_index = symp - sym0; + return 0; } } fprintf(stderr, "Cannot find symbol for section %u: %s.\n", txtndx, txtname); - fail_file(); + return -1; } - /* Evade ISO C restriction: no declaration after statement in has_rel_mcount. */ -static char const * -__has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ - Elf_Shdr const *const shdr0, - char const *const shstrtab, - char const *const fname) +static char const * __has_rel_mcount(Elf_Shdr const *const relhdr, /* reltype */ + Elf_Shdr const *const shdr0, + char const *const shstrtab, + char const *const fname) { /* .sh_info depends on .sh_type == SHT_REL[,A] */ Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)]; @@ -461,7 +479,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ if (strcmp("__mcount_loc", txtname) == 0) { fprintf(stderr, "warning: __mcount_loc already exists: %s\n", fname); - succeed_file(); + return already_has_rel_mcount; } if (w(txthdr->sh_type) != SHT_PROGBITS || !(_w(txthdr->sh_flags) & SHF_EXECINSTR)) @@ -491,6 +509,10 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, for (; nhdr; --nhdr, ++shdrp) { txtname = has_rel_mcount(shdrp, shdr0, shstrtab, fname); + if (txtname == already_has_rel_mcount) { + totrelsz = 0; + break; + } if (txtname && is_mcounted_section_name(txtname)) totrelsz += _w(shdrp->sh_size); } @@ -499,8 +521,8 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, /* Overall supervision for Elf32 ET_REL file. */ -static void -do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) +static int do_func(Elf_Ehdr *const ehdr, char const *const fname, + unsigned const reltype) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); @@ -513,26 +535,54 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) unsigned k; /* Upper bound on space: assume all relevant relocs are for mcount. */ - unsigned const totrelsz = tot_relsize(shdr0, nhdr, shstrtab, fname); - Elf_Rel *const mrel0 = umalloc(totrelsz); - Elf_Rel * mrelp = mrel0; + unsigned totrelsz; - /* 2*sizeof(address) <= sizeof(Elf_Rel) */ - uint_t *const mloc0 = umalloc(totrelsz>>1); - uint_t * mlocp = mloc0; + Elf_Rel * mrel0; + Elf_Rel * mrelp; + + uint_t * mloc0; + uint_t * mlocp; unsigned rel_entsize = 0; unsigned symsec_sh_link = 0; + int result = 0; + + totrelsz = tot_relsize(shdr0, nhdr, shstrtab, fname); + if (totrelsz == 0) + return 0; + mrel0 = umalloc(totrelsz); + mrelp = mrel0; + if (!mrel0) + return -1; + + /* 2*sizeof(address) <= sizeof(Elf_Rel) */ + mloc0 = umalloc(totrelsz>>1); + mlocp = mloc0; + if (!mloc0) { + free(mrel0); + return -1; + } + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); + if (txtname == already_has_rel_mcount) { + result = 0; + file_updated = 0; + goto out; /* Nothing to be done; don't append! */ + } if (txtname && is_mcounted_section_name(txtname)) { + unsigned int recsym; uint_t recval = 0; - unsigned const recsym = find_secsym_ndx( - w(relhdr->sh_info), txtname, &recval, - &shdr0[symsec_sh_link = w(relhdr->sh_link)], - ehdr); + + symsec_sh_link = w(relhdr->sh_link); + result = find_secsym_ndx(w(relhdr->sh_info), txtname, + &recval, &recsym, + &shdr0[symsec_sh_link], + ehdr); + if (result) + goto out; rel_entsize = _w(relhdr->sh_entsize); mlocp = sift_rel_mcount(mlocp, @@ -543,13 +593,17 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) * This section is ignored by ftrace, but still * has mcount calls. Convert them to nops now. */ - nop_mcount(relhdr, ehdr, txtname); + if (nop_mcount(relhdr, ehdr, txtname) < 0) { + result = -1; + goto out; + } } } - if (mloc0 != mlocp) { - append_func(ehdr, shstr, mloc0, mlocp, mrel0, mrelp, - rel_entsize, symsec_sh_link); - } + if (!result && mloc0 != mlocp) + result = append_func(ehdr, shstr, mloc0, mlocp, mrel0, mrelp, + rel_entsize, symsec_sh_link); +out: free(mrel0); free(mloc0); + return result; } diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile index a61b2e743e998f..f9f79fb272f016 100644 --- a/tools/testing/selftests/arm64/Makefile +++ b/tools/testing/selftests/arm64/Makefile @@ -4,6 +4,7 @@ ARCH ?= $(shell uname -m 2>/dev/null || echo not) ifneq (,$(filter $(ARCH),aarch64 arm64)) +CFLAGS += -I../../../../usr/include/ TEST_GEN_PROGS := tags_test TEST_PROGS := run_tags_test.sh endif diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 1d96c5f7e402b7..86986c4bba549a 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -115,7 +115,7 @@ ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file command=$(echo "$2" | tr -d ^) echo "Test command: $command" echo > error_log - (! echo "$command" > "$3" ) 2> /dev/null + (! echo "$command" >> "$3" ) 2> /dev/null grep "$1: error:" -A 3 error_log N=$(tail -n 1 error_log | wc -c) # " Command: " and "^\n" => 13 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc index 3fb70e01b1fe18..3ff236719b6e87 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc @@ -24,7 +24,21 @@ test -d events/kprobes2/event2 || exit_failure :;: "Add an event on dot function without name" ;: -FUNC=`grep -m 10 " [tT] .*\.isra\..*$" /proc/kallsyms | tail -n 1 | cut -f 3 -d " "` +find_dot_func() { + if [ ! -f available_filter_functions ]; then + grep -m 10 " [tT] .*\.isra\..*$" /proc/kallsyms | tail -n 1 | cut -f 3 -d " " + return; + fi + + grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do + if grep -s $f available_filter_functions; then + echo $f + break + fi + done +} + +FUNC=`find_dot_func | tail -n 1` [ "x" != "x$FUNC" ] || exit_unresolved echo "p $FUNC" > kprobe_events EVENT=`grep $FUNC kprobe_events | cut -f 1 -d " " | cut -f 2 -d:` diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc new file mode 100644 index 00000000000000..44494bac86d13f --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc @@ -0,0 +1,35 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# description: Create/delete multiprobe on kprobe event + +[ -f kprobe_events ] || exit_unsupported + +grep -q "Create/append/" README || exit_unsupported + +# Choose 2 symbols for target +SYM1=_do_fork +SYM2=do_exit +EVENT_NAME=kprobes/testevent + +DEF1="p:$EVENT_NAME $SYM1" +DEF2="p:$EVENT_NAME $SYM2" + +:;: "Define an event which has 2 probes" ;: +echo $DEF1 >> kprobe_events +echo $DEF2 >> kprobe_events +cat kprobe_events | grep "$DEF1" +cat kprobe_events | grep "$DEF2" + +:;: "Remove the event by name (should remove both)" ;: +echo "-:$EVENT_NAME" >> kprobe_events +test `cat kprobe_events | wc -l` -eq 0 + +:;: "Remove just 1 event" ;: +echo $DEF1 >> kprobe_events +echo $DEF2 >> kprobe_events +echo "-:$EVENT_NAME $SYM1" >> kprobe_events +! cat kprobe_events | grep "$DEF1" +cat kprobe_events | grep "$DEF2" + +:;: "Appending different type must fail" ;: +! echo "$DEF1 \$stack" >> kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc index 29faaec942c69f..8a4025e912cb7f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc @@ -41,6 +41,11 @@ check_error 'p vfs_read ^%none_reg' # BAD_REG_NAME check_error 'p vfs_read ^@12345678abcde' # BAD_MEM_ADDR check_error 'p vfs_read ^@+10' # FILE_ON_KPROBE +grep -q "imm-value" README && \ +check_error 'p vfs_read arg1=\^x' # BAD_IMM +grep -q "imm-string" README && \ +check_error 'p vfs_read arg1=\"abcd^' # IMMSTR_NO_CLOSE + check_error 'p vfs_read ^+0@0)' # DEREF_NEED_BRACE check_error 'p vfs_read ^+0ab1(@0)' # BAD_DEREF_OFFS check_error 'p vfs_read +0(+0(@0^)' # DEREF_OPEN_BRACE @@ -82,4 +87,15 @@ case $(uname -m) in ;; esac +# multiprobe errors +if grep -q "Create/append/" README && grep -q "imm-value" README; then +echo 'p:kprobes/testevent _do_fork' > kprobe_events +check_error '^r:kprobes/testevent do_exit' # DIFF_PROBE_TYPE +echo 'p:kprobes/testevent _do_fork abcd=\1' > kprobe_events +check_error 'p:kprobes/testevent _do_fork ^bcd=\1' # DIFF_ARG_TYPE +check_error 'p:kprobes/testevent _do_fork ^abcd=\1:u8' # DIFF_ARG_TYPE +check_error 'p:kprobes/testevent _do_fork ^abcd=\"foo"' # DIFF_ARG_TYPE +check_error '^p:kprobes/testevent _do_fork' # SAME_PROBE +fi + exit 0 diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index b3ad909aefbceb..644770c3b75433 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -26,6 +26,7 @@ SUB_DIRS = alignment \ switch_endian \ syscalls \ tm \ + eeh \ vphn \ math \ ptrace \ diff --git a/tools/testing/selftests/powerpc/copyloops/.gitignore b/tools/testing/selftests/powerpc/copyloops/.gitignore index ce12cd0e2967ad..12ef5b031974de 100644 --- a/tools/testing/selftests/powerpc/copyloops/.gitignore +++ b/tools/testing/selftests/powerpc/copyloops/.gitignore @@ -1,13 +1,14 @@ copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 -copyuser_power7_t0 -copyuser_power7_t1 +copyuser_p7_t0 +copyuser_p7_t1 memcpy_64_t0 memcpy_64_t1 memcpy_64_t2 -memcpy_power7_t0 -memcpy_power7_t1 +memcpy_p7_t0 +memcpy_p7_t1 copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2 +memcpy_mcsafe_64 diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile index 44574f3818b3d7..0917983a1c781d 100644 --- a/tools/testing/selftests/powerpc/copyloops/Makefile +++ b/tools/testing/selftests/powerpc/copyloops/Makefile @@ -12,7 +12,7 @@ ASFLAGS = $(CFLAGS) -Wa,-mpower4 TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \ copyuser_p7_t0 copyuser_p7_t1 \ memcpy_64_t0 memcpy_64_t1 memcpy_64_t2 \ - memcpy_p7_t0 memcpy_p7_t1 \ + memcpy_p7_t0 memcpy_p7_t1 memcpy_mcsafe_64 \ copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2 EXTRA_SOURCES := validate.c ../harness.c stubs.S @@ -45,6 +45,11 @@ $(OUTPUT)/memcpy_p7_t%: memcpy_power7.S $(EXTRA_SOURCES) -D SELFTEST_CASE=$(subst memcpy_p7_t,,$(notdir $@)) \ -o $@ $^ +$(OUTPUT)/memcpy_mcsafe_64: memcpy_mcsafe_64.S $(EXTRA_SOURCES) + $(CC) $(CPPFLAGS) $(CFLAGS) \ + -D COPY_LOOP=test_memcpy_mcsafe \ + -o $@ $^ + $(OUTPUT)/copyuser_64_exc_t%: copyuser_64.S exc_validate.c ../harness.c \ copy_tofrom_user_reference.S stubs.S $(CC) $(CPPFLAGS) $(CFLAGS) \ diff --git a/tools/testing/selftests/powerpc/copyloops/asm/export.h b/tools/testing/selftests/powerpc/copyloops/asm/export.h index 05c1663c89b037..e6b80d5fbd14b6 100644 --- a/tools/testing/selftests/powerpc/copyloops/asm/export.h +++ b/tools/testing/selftests/powerpc/copyloops/asm/export.h @@ -1,3 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ #define EXPORT_SYMBOL(x) +#define EXPORT_SYMBOL_GPL(x) #define EXPORT_SYMBOL_KASAN(x) diff --git a/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S b/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S new file mode 120000 index 00000000000000..f0feef3062f63d --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S @@ -0,0 +1 @@ +../../../../../arch/powerpc/lib/memcpy_mcsafe_64.S \ No newline at end of file diff --git a/tools/testing/selftests/powerpc/eeh/Makefile b/tools/testing/selftests/powerpc/eeh/Makefile new file mode 100644 index 00000000000000..b397babd569b7a --- /dev/null +++ b/tools/testing/selftests/powerpc/eeh/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +noarg: + $(MAKE) -C ../ + +TEST_PROGS := eeh-basic.sh +TEST_FILES := eeh-functions.sh + +top_srcdir = ../../../../.. +include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh new file mode 100755 index 00000000000000..f988d2f42e8f21 --- /dev/null +++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh @@ -0,0 +1,82 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only + +. ./eeh-functions.sh + +if ! eeh_supported ; then + echo "EEH not supported on this system, skipping" + exit 0; +fi + +if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \ + [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then + echo "debugfs EEH testing files are missing. Is debugfs mounted?" + exit 1; +fi + +pre_lspci=`mktemp` +lspci > $pre_lspci + +# Bump the max freeze count to something absurd so we don't +# trip over it while breaking things. +echo 5000 > /sys/kernel/debug/powerpc/eeh_max_freezes + +# record the devices that we break in here. Assuming everything +# goes to plan we should get them back once the recover process +# is finished. +devices="" + +# Build up a list of candidate devices. +for dev in `ls -1 /sys/bus/pci/devices/ | grep '\.0$'` ; do + # skip bridges since we can't recover them (yet...) + if [ -e "/sys/bus/pci/devices/$dev/pci_bus" ] ; then + echo "$dev, Skipped: bridge" + continue; + fi + + # Skip VFs for now since we don't have a reliable way + # to break them. + if [ -e "/sys/bus/pci/devices/$dev/physfn" ] ; then + echo "$dev, Skipped: virtfn" + continue; + fi + + # Don't inject errosr into an already-frozen PE. This happens with + # PEs that contain multiple PCI devices (e.g. multi-function cards) + # and injecting new errors during the recovery process will probably + # result in the recovery failing and the device being marked as + # failed. + if ! pe_ok $dev ; then + echo "$dev, Skipped: Bad initial PE state" + continue; + fi + + echo "$dev, Added" + + # Add to this list of device to check + devices="$devices $dev" +done + +dev_count="$(echo $devices | wc -w)" +echo "Found ${dev_count} breakable devices..." + +failed=0 +for dev in $devices ; do + echo "Breaking $dev..." + + if ! pe_ok $dev ; then + echo "Skipping $dev, Initial PE state is not ok" + failed="$((failed + 1))" + continue; + fi + + if ! eeh_one_dev $dev ; then + failed="$((failed + 1))" + fi +done + +echo "$failed devices failed to recover ($dev_count tested)" +lspci | diff -u $pre_lspci - +rm -f $pre_lspci + +exit $failed diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh new file mode 100755 index 00000000000000..26112ab5cdf425 --- /dev/null +++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh @@ -0,0 +1,76 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only + +pe_ok() { + local dev="$1" + local path="/sys/bus/pci/devices/$dev/eeh_pe_state" + + if ! [ -e "$path" ] ; then + return 1; + fi + + local fw_state="$(cut -d' ' -f1 < $path)" + local sw_state="$(cut -d' ' -f2 < $path)" + + # If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an + # error state or being recovered. Either way, not ok. + if [ "$((sw_state & 0x3))" -ne 0 ] ; then + return 1 + fi + + # A functioning PE should have the EEH_STATE_MMIO_ACTIVE and + # EEH_STATE_DMA_ACTIVE flags set. For some goddamn stupid reason + # the platform backends set these when the PE is in reset. The + # RECOVERING check above should stop any false positives though. + if [ "$((fw_state & 0x18))" -ne "$((0x18))" ] ; then + return 1 + fi + + return 0; +} + +eeh_supported() { + test -e /proc/powerpc/eeh && \ + grep -q 'EEH Subsystem is enabled' /proc/powerpc/eeh +} + +eeh_one_dev() { + local dev="$1" + + # Using this function from the command line is sometimes useful for + # testing so check that the argument is a well-formed sysfs device + # name. + if ! test -e /sys/bus/pci/devices/$dev/ ; then + echo "Error: '$dev' must be a sysfs device name (DDDD:BB:DD.F)" + return 1; + fi + + # Break it + echo $dev >/sys/kernel/debug/powerpc/eeh_dev_break + + # Force an EEH device check. If the kernel has already + # noticed the EEH (due to a driver poll or whatever), this + # is a no-op. + echo $dev >/sys/kernel/debug/powerpc/eeh_dev_check + + # Enforce a 30s timeout for recovery. Even the IPR, which is infamously + # slow to reset, should recover within 30s. + max_wait=30 + + for i in `seq 0 ${max_wait}` ; do + if pe_ok $dev ; then + break; + fi + echo "$dev, waited $i/${max_wait}" + sleep 1 + done + + if ! pe_ok $dev ; then + echo "$dev, Failed to recover!" + return 1; + fi + + echo "$dev, Recovered after $i seconds" + return 0; +} + diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index 07ec449a276752..dce19f221c461f 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -10,3 +10,6 @@ ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak perf-hwbreak +core-pkey +ptrace-pkey +ptrace-syscall diff --git a/tools/testing/selftests/powerpc/security/.gitignore b/tools/testing/selftests/powerpc/security/.gitignore new file mode 100644 index 00000000000000..0b969fba3beb2c --- /dev/null +++ b/tools/testing/selftests/powerpc/security/.gitignore @@ -0,0 +1 @@ +rfi_flush diff --git a/tools/testing/selftests/powerpc/stringloops/.gitignore b/tools/testing/selftests/powerpc/stringloops/.gitignore index 0b43da74ee46ac..31a17e0ba88479 100644 --- a/tools/testing/selftests/powerpc/stringloops/.gitignore +++ b/tools/testing/selftests/powerpc/stringloops/.gitignore @@ -1 +1,4 @@ -memcmp +memcmp_64 +memcmp_32 +strlen +strlen_32 diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c index d57c2d2ab6ec4d..254f912ad611e4 100644 --- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c +++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c @@ -5,10 +5,11 @@ * Test the kernel's signal frame code. * * The kernel sets up two sets of ucontexts if the signal was to be - * delivered while the thread was in a transaction. + * delivered while the thread was in a transaction (referred too as + * first and second contexts). * Expected behaviour is that the checkpointed state is in the user - * context passed to the signal handler. The speculated state can be - * accessed with the uc_link pointer. + * context passed to the signal handler (first context). The speculated + * state can be accessed with the uc_link pointer (second context). * * The rationale for this is that if TM unaware code (which linked * against TM libs) installs a signal handler it will not know of the @@ -28,17 +29,20 @@ #define MAX_ATTEMPT 500000 -#define NV_FPU_REGS 18 +#define NV_FPU_REGS 18 /* Number of non-volatile FP registers */ +#define FPR14 14 /* First non-volatile FP register to check in f14-31 subset */ long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss); -/* Be sure there are 2x as many as there are NV FPU regs (2x18) */ +/* Test only non-volatile registers, i.e. 18 fpr registers from f14 to f31 */ static double fps[] = { + /* First context will be set with these values, i.e. non-speculative */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + /* Second context will be set with these values, i.e. speculative */ -1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18 }; -static sig_atomic_t fail; +static sig_atomic_t fail, broken; static void signal_usr1(int signum, siginfo_t *info, void *uc) { @@ -46,11 +50,24 @@ static void signal_usr1(int signum, siginfo_t *info, void *uc) ucontext_t *ucp = uc; ucontext_t *tm_ucp = ucp->uc_link; - for (i = 0; i < NV_FPU_REGS && !fail; i++) { - fail = (ucp->uc_mcontext.fp_regs[i + 14] != fps[i]); - fail |= (tm_ucp->uc_mcontext.fp_regs[i + 14] != fps[i + NV_FPU_REGS]); - if (fail) - printf("Failed on %d FP %g or %g\n", i, ucp->uc_mcontext.fp_regs[i + 14], tm_ucp->uc_mcontext.fp_regs[i + 14]); + for (i = 0; i < NV_FPU_REGS; i++) { + /* Check first context. Print all mismatches. */ + fail = (ucp->uc_mcontext.fp_regs[FPR14 + i] != fps[i]); + if (fail) { + broken = 1; + printf("FPR%d (1st context) == %g instead of %g (expected)\n", + FPR14 + i, ucp->uc_mcontext.fp_regs[FPR14 + i], fps[i]); + } + } + + for (i = 0; i < NV_FPU_REGS; i++) { + /* Check second context. Print all mismatches. */ + fail = (tm_ucp->uc_mcontext.fp_regs[FPR14 + i] != fps[NV_FPU_REGS + i]); + if (fail) { + broken = 1; + printf("FPR%d (2nd context) == %g instead of %g (expected)\n", + FPR14 + i, tm_ucp->uc_mcontext.fp_regs[FPR14 + i], fps[NV_FPU_REGS + i]); + } } } @@ -72,13 +89,19 @@ static int tm_signal_context_chk_fpu() } i = 0; - while (i < MAX_ATTEMPT && !fail) { + while (i < MAX_ATTEMPT && !broken) { + /* + * tm_signal_self_context_load will set both first and second + * contexts accordingly to the values passed through non-NULL + * array pointers to it, in that case 'fps', and invoke the + * signal handler installed for SIGUSR1. + */ rc = tm_signal_self_context_load(pid, NULL, fps, NULL, NULL); FAIL_IF(rc != pid); i++; } - return fail; + return (broken); } int main(void) diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c index 4d05f8b0254ccb..0cc680f61828bf 100644 --- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c +++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c @@ -5,10 +5,11 @@ * Test the kernel's signal frame code. * * The kernel sets up two sets of ucontexts if the signal was to be - * delivered while the thread was in a transaction. + * delivered while the thread was in a transaction (referred too as + * first and second contexts). * Expected behaviour is that the checkpointed state is in the user - * context passed to the signal handler. The speculated state can be - * accessed with the uc_link pointer. + * context passed to the signal handler (first context). The speculated + * state can be accessed with the uc_link pointer (second context). * * The rationale for this is that if TM unaware code (which linked * against TM libs) installs a signal handler it will not know of the @@ -28,14 +29,22 @@ #define MAX_ATTEMPT 500000 -#define NV_GPR_REGS 18 +#define NV_GPR_REGS 18 /* Number of non-volatile GPR registers */ +#define R14 14 /* First non-volatile register to check in r14-r31 subset */ long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss); -static sig_atomic_t fail; +static sig_atomic_t fail, broken; -static long gps[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - -1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}; +/* Test only non-volatile general purpose registers, i.e. r14-r31 */ +static long gprs[] = { + /* First context will be set with these values, i.e. non-speculative */ + /* R14, R15, ... */ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + /* Second context will be set with these values, i.e. speculative */ + /* R14, R15, ... */ + -1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18 +}; static void signal_usr1(int signum, siginfo_t *info, void *uc) { @@ -43,12 +52,24 @@ static void signal_usr1(int signum, siginfo_t *info, void *uc) ucontext_t *ucp = uc; ucontext_t *tm_ucp = ucp->uc_link; - for (i = 0; i < NV_GPR_REGS && !fail; i++) { - fail = (ucp->uc_mcontext.gp_regs[i + 14] != gps[i]); - fail |= (tm_ucp->uc_mcontext.gp_regs[i + 14] != gps[i + NV_GPR_REGS]); - if (fail) - printf("Failed on %d GPR %lu or %lu\n", i, - ucp->uc_mcontext.gp_regs[i + 14], tm_ucp->uc_mcontext.gp_regs[i + 14]); + /* Check first context. Print all mismatches. */ + for (i = 0; i < NV_GPR_REGS; i++) { + fail = (ucp->uc_mcontext.gp_regs[R14 + i] != gprs[i]); + if (fail) { + broken = 1; + printf("GPR%d (1st context) == %lu instead of %lu (expected)\n", + R14 + i, ucp->uc_mcontext.gp_regs[R14 + i], gprs[i]); + } + } + + /* Check second context. Print all mismatches. */ + for (i = 0; i < NV_GPR_REGS; i++) { + fail = (tm_ucp->uc_mcontext.gp_regs[R14 + i] != gprs[NV_GPR_REGS + i]); + if (fail) { + broken = 1; + printf("GPR%d (2nd context) == %lu instead of %lu (expected)\n", + R14 + i, tm_ucp->uc_mcontext.gp_regs[R14 + i], gprs[NV_GPR_REGS + i]); + } } } @@ -70,13 +91,19 @@ static int tm_signal_context_chk_gpr() } i = 0; - while (i < MAX_ATTEMPT && !fail) { - rc = tm_signal_self_context_load(pid, gps, NULL, NULL, NULL); + while (i < MAX_ATTEMPT && !broken) { + /* + * tm_signal_self_context_load will set both first and second + * contexts accordingly to the values passed through non-NULL + * array pointers to it, in that case 'gprs', and invoke the + * signal handler installed for SIGUSR1. + */ + rc = tm_signal_self_context_load(pid, gprs, NULL, NULL, NULL); FAIL_IF(rc != pid); i++; } - return fail; + return broken; } int main(void) diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c index 48ad01499b1ae2..b6d52730a0d832 100644 --- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c +++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c @@ -5,10 +5,11 @@ * Test the kernel's signal frame code. * * The kernel sets up two sets of ucontexts if the signal was to be - * delivered while the thread was in a transaction. + * delivered while the thread was in a transaction (referred too as + * first and second contexts). * Expected behaviour is that the checkpointed state is in the user - * context passed to the signal handler. The speculated state can be - * accessed with the uc_link pointer. + * context passed to the signal handler (first context). The speculated + * state can be accessed with the uc_link pointer (second context). * * The rationale for this is that if TM unaware code (which linked * against TM libs) installs a signal handler it will not know of the @@ -29,18 +30,24 @@ #define MAX_ATTEMPT 500000 -#define NV_VMX_REGS 12 +#define NV_VMX_REGS 12 /* Number of non-volatile VMX registers */ +#define VMX20 20 /* First non-volatile register to check in vr20-31 subset */ long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss); -static sig_atomic_t fail; +static sig_atomic_t fail, broken; +/* Test only non-volatile registers, i.e. 12 vmx registers from vr20 to vr31 */ vector int vms[] = { - {1, 2, 3, 4 },{5, 6, 7, 8 },{9, 10,11,12}, + /* First context will be set with these values, i.e. non-speculative */ + /* VMX20 , VMX21 , ... */ + { 1, 2, 3, 4},{ 5, 6, 7, 8},{ 9,10,11,12}, {13,14,15,16},{17,18,19,20},{21,22,23,24}, {25,26,27,28},{29,30,31,32},{33,34,35,36}, {37,38,39,40},{41,42,43,44},{45,46,47,48}, - {-1, -2, -3, -4}, {-5, -6, -7, -8}, {-9, -10,-11,-12}, + /* Second context will be set with these values, i.e. speculative */ + /* VMX20 , VMX21 , ... */ + { -1, -2, -3, -4},{ -5, -6, -7, -8},{ -9,-10,-11,-12}, {-13,-14,-15,-16},{-17,-18,-19,-20},{-21,-22,-23,-24}, {-25,-26,-27,-28},{-29,-30,-31,-32},{-33,-34,-35,-36}, {-37,-38,-39,-40},{-41,-42,-43,-44},{-45,-46,-47,-48} @@ -48,26 +55,43 @@ vector int vms[] = { static void signal_usr1(int signum, siginfo_t *info, void *uc) { - int i; + int i, j; ucontext_t *ucp = uc; ucontext_t *tm_ucp = ucp->uc_link; - for (i = 0; i < NV_VMX_REGS && !fail; i++) { - fail = memcmp(ucp->uc_mcontext.v_regs->vrregs[i + 20], + for (i = 0; i < NV_VMX_REGS; i++) { + /* Check first context. Print all mismatches. */ + fail = memcmp(ucp->uc_mcontext.v_regs->vrregs[VMX20 + i], &vms[i], sizeof(vector int)); - fail |= memcmp(tm_ucp->uc_mcontext.v_regs->vrregs[i + 20], - &vms[i + NV_VMX_REGS], sizeof (vector int)); - if (fail) { - int j; + broken = 1; + printf("VMX%d (1st context) == 0x", VMX20 + i); + /* Print actual value in first context. */ + for (j = 0; j < 4; j++) + printf("%08x", ucp->uc_mcontext.v_regs->vrregs[VMX20 + i][j]); + printf(" instead of 0x"); + /* Print expected value. */ + for (j = 0; j < 4; j++) + printf("%08x", vms[i][j]); + printf(" (expected)\n"); + } + } - fprintf(stderr, "Failed on %d vmx 0x", i); + for (i = 0; i < NV_VMX_REGS; i++) { + /* Check second context. Print all mismatches. */ + fail = memcmp(tm_ucp->uc_mcontext.v_regs->vrregs[VMX20 + i], + &vms[NV_VMX_REGS + i], sizeof (vector int)); + if (fail) { + broken = 1; + printf("VMX%d (2nd context) == 0x", NV_VMX_REGS + i); + /* Print actual value in second context. */ + for (j = 0; j < 4; j++) + printf("%08x", tm_ucp->uc_mcontext.v_regs->vrregs[VMX20 + i][j]); + printf(" instead of 0x"); + /* Print expected value. */ for (j = 0; j < 4; j++) - fprintf(stderr, "%04x", ucp->uc_mcontext.v_regs->vrregs[i + 20][j]); - fprintf(stderr, " vs 0x"); - for (j = 0 ; j < 4; j++) - fprintf(stderr, "%04x", tm_ucp->uc_mcontext.v_regs->vrregs[i + 20][j]); - fprintf(stderr, "\n"); + printf("%08x", vms[NV_VMX_REGS + i][j]); + printf(" (expected)\n"); } } } @@ -90,13 +114,19 @@ static int tm_signal_context_chk() } i = 0; - while (i < MAX_ATTEMPT && !fail) { + while (i < MAX_ATTEMPT && !broken) { + /* + * tm_signal_self_context_load will set both first and second + * contexts accordingly to the values passed through non-NULL + * array pointers to it, in that case 'vms', and invoke the + * signal handler installed for SIGUSR1. + */ rc = tm_signal_self_context_load(pid, NULL, NULL, vms, NULL); FAIL_IF(rc != pid); i++; } - return fail; + return (broken); } int main(void) diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c index 8c8677a408bb9e..8e25e2072ecda8 100644 --- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c +++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c @@ -5,10 +5,11 @@ * Test the kernel's signal frame code. * * The kernel sets up two sets of ucontexts if the signal was to be - * delivered while the thread was in a transaction. + * delivered while the thread was in a transaction (referred too as + * first and second contexts). * Expected behaviour is that the checkpointed state is in the user - * context passed to the signal handler. The speculated state can be - * accessed with the uc_link pointer. + * context passed to the signal handler (first context). The speculated + * state can be accessed with the uc_link pointer (second context). * * The rationale for this is that if TM unaware code (which linked * against TM libs) installs a signal handler it will not know of the @@ -29,17 +30,24 @@ #define MAX_ATTEMPT 500000 -#define NV_VSX_REGS 12 +#define NV_VSX_REGS 12 /* Number of VSX registers to check. */ +#define VSX20 20 /* First VSX register to check in vsr20-vsr31 subset */ +#define FPR20 20 /* FPR20 overlaps VSX20 most significant doubleword */ long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss); -static sig_atomic_t fail; +static sig_atomic_t fail, broken; -vector int vss[] = { - {1, 2, 3, 4 },{5, 6, 7, 8 },{9, 10,11,12}, +/* Test only 12 vsx registers from vsr20 to vsr31 */ +vector int vsxs[] = { + /* First context will be set with these values, i.e. non-speculative */ + /* VSX20 , VSX21 , ... */ + { 1, 2, 3, 4},{ 5, 6, 7, 8},{ 9,10,11,12}, {13,14,15,16},{17,18,19,20},{21,22,23,24}, {25,26,27,28},{29,30,31,32},{33,34,35,36}, {37,38,39,40},{41,42,43,44},{45,46,47,48}, + /* Second context will be set with these values, i.e. speculative */ + /* VSX20 , VSX21 , ... */ {-1, -2, -3, -4 },{-5, -6, -7, -8 },{-9, -10,-11,-12}, {-13,-14,-15,-16},{-17,-18,-19,-20},{-21,-22,-23,-24}, {-25,-26,-27,-28},{-29,-30,-31,-32},{-33,-34,-35,-36}, @@ -48,41 +56,91 @@ vector int vss[] = { static void signal_usr1(int signum, siginfo_t *info, void *uc) { - int i; - uint8_t vsc[sizeof(vector int)]; - uint8_t vst[sizeof(vector int)]; + int i, j; + uint8_t vsx[sizeof(vector int)]; + uint8_t vsx_tm[sizeof(vector int)]; ucontext_t *ucp = uc; ucontext_t *tm_ucp = ucp->uc_link; /* - * The other half of the VSX regs will be after v_regs. + * FP registers and VMX registers overlap the VSX registers. + * + * FP registers (f0-31) overlap the most significant 64 bits of VSX + * registers vsr0-31, whilst VMX registers vr0-31, being 128-bit like + * the VSX registers, overlap fully the other half of VSX registers, + * i.e. vr0-31 overlaps fully vsr32-63. + * + * Due to compatibility and historical reasons (VMX/Altivec support + * appeared first on the architecture), VMX registers vr0-31 (so VSX + * half vsr32-63 too) are stored right after the v_regs pointer, in an + * area allocated for 'vmx_reverse' array (please see + * arch/powerpc/include/uapi/asm/sigcontext.h for details about the + * mcontext_t structure on Power). + * + * The other VSX half (vsr0-31) is hence stored below vr0-31/vsr32-63 + * registers, but only the least significant 64 bits of vsr0-31. The + * most significant 64 bits of vsr0-31 (f0-31), as it overlaps the FP + * registers, is kept in fp_regs. + * + * v_regs is a 16 byte aligned pointer at the start of vmx_reserve + * (vmx_reserve may or may not be 16 aligned) where the v_regs structure + * exists, so v_regs points to where vr0-31 / vsr32-63 registers are + * fully stored. Since v_regs type is elf_vrregset_t, v_regs + 1 + * skips all the slots used to store vr0-31 / vsr32-64 and points to + * part of one VSX half, i.e. v_regs + 1 points to the least significant + * 64 bits of vsr0-31. The other part of this half (the most significant + * part of vsr0-31) is stored in fp_regs. * - * In short, vmx_reserve array holds everything. v_regs is a 16 - * byte aligned pointer at the start of vmx_reserve (vmx_reserve - * may or may not be 16 aligned) where the v_regs structure exists. - * (half of) The VSX regsters are directly after v_regs so the - * easiest way to find them below. */ + /* Get pointer to least significant doubleword of vsr0-31 */ long *vsx_ptr = (long *)(ucp->uc_mcontext.v_regs + 1); long *tm_vsx_ptr = (long *)(tm_ucp->uc_mcontext.v_regs + 1); - for (i = 0; i < NV_VSX_REGS && !fail; i++) { - memcpy(vsc, &ucp->uc_mcontext.fp_regs[i + 20], 8); - memcpy(vsc + 8, &vsx_ptr[20 + i], 8); - fail = memcmp(vsc, &vss[i], sizeof(vector int)); - memcpy(vst, &tm_ucp->uc_mcontext.fp_regs[i + 20], 8); - memcpy(vst + 8, &tm_vsx_ptr[20 + i], 8); - fail |= memcmp(vst, &vss[i + NV_VSX_REGS], sizeof(vector int)); - if (fail) { - int j; + /* Check first context. Print all mismatches. */ + for (i = 0; i < NV_VSX_REGS; i++) { + /* + * Copy VSX most significant doubleword from fp_regs and + * copy VSX least significant one from 64-bit slots below + * saved VMX registers. + */ + memcpy(vsx, &ucp->uc_mcontext.fp_regs[FPR20 + i], 8); + memcpy(vsx + 8, &vsx_ptr[VSX20 + i], 8); + + fail = memcmp(vsx, &vsxs[i], sizeof(vector int)); - fprintf(stderr, "Failed on %d vsx 0x", i); + if (fail) { + broken = 1; + printf("VSX%d (1st context) == 0x", VSX20 + i); for (j = 0; j < 16; j++) - fprintf(stderr, "%02x", vsc[j]); - fprintf(stderr, " vs 0x"); + printf("%02x", vsx[j]); + printf(" instead of 0x"); + for (j = 0; j < 4; j++) + printf("%08x", vsxs[i][j]); + printf(" (expected)\n"); + } + } + + /* Check second context. Print all mismatches. */ + for (i = 0; i < NV_VSX_REGS; i++) { + /* + * Copy VSX most significant doubleword from fp_regs and + * copy VSX least significant one from 64-bit slots below + * saved VMX registers. + */ + memcpy(vsx_tm, &tm_ucp->uc_mcontext.fp_regs[FPR20 + i], 8); + memcpy(vsx_tm + 8, &tm_vsx_ptr[VSX20 + i], 8); + + fail = memcmp(vsx_tm, &vsxs[NV_VSX_REGS + i], sizeof(vector int)); + + if (fail) { + broken = 1; + printf("VSX%d (2nd context) == 0x", VSX20 + i); for (j = 0; j < 16; j++) - fprintf(stderr, "%02x", vst[j]); - fprintf(stderr, "\n"); + printf("%02x", vsx_tm[j]); + printf(" instead of 0x"); + for (j = 0; j < 4; j++) + printf("%08x", vsxs[NV_VSX_REGS + i][j]); + printf("(expected)\n"); } } } @@ -105,13 +163,19 @@ static int tm_signal_context_chk() } i = 0; - while (i < MAX_ATTEMPT && !fail) { - rc = tm_signal_self_context_load(pid, NULL, NULL, NULL, vss); + while (i < MAX_ATTEMPT && !broken) { + /* + * tm_signal_self_context_load will set both first and second + * contexts accordingly to the values passed through non-NULL + * array pointers to it, in that case 'vsxs', and invoke the + * signal handler installed for SIGUSR1. + */ + rc = tm_signal_self_context_load(pid, NULL, NULL, NULL, vsxs); FAIL_IF(rc != pid); i++; } - return fail; + return (broken); } int main(void) diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h index 97f9f491c541a5..c402464b038fc8 100644 --- a/tools/testing/selftests/powerpc/tm/tm.h +++ b/tools/testing/selftests/powerpc/tm/tm.h @@ -55,7 +55,8 @@ static inline bool failure_is_unavailable(void) static inline bool failure_is_reschedule(void) { if ((failure_code() & TM_CAUSE_RESCHED) == TM_CAUSE_RESCHED || - (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED) + (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED || + (failure_code() & TM_CAUSE_KVM_FAC_UNAV) == TM_CAUSE_KVM_FAC_UNAV) return true; return false; diff --git a/usr/include/Makefile b/usr/include/Makefile index 1fb6abe29b2fdb..05c71ef42f51af 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -115,6 +115,4 @@ header-test-y += $(filter-out $(header-test-), \ $(patsubst $(obj)/%,%, $(wildcard \ $(addprefix $(obj)/, *.h */*.h */*/*.h */*/*/*.h)))) -# For GNU Make <= 4.2.1, $(wildcard $(obj)/*/) matches to not only directories -# but also regular files. Use $(filter %/, ...) just in case. -clean-dirs += $(patsubst $(obj)/%/,%,$(filter %/, $(wildcard $(obj)/*/))) +clean-files += $(filter-out Makefile, $(notdir $(wildcard $(obj)/*)))