!Edrivers/hsi/hsi.c
</chapter>
+ <chapter id="pwm">
+ <title>Pulse-Width Modulation (PWM)</title>
+ <para>
+ Pulse-width modulation is a modulation technique primarily used to
+ control power supplied to electrical devices.
+ </para>
+ <para>
+ The PWM framework provides an abstraction for providers and consumers
+ of PWM signals. A controller that provides one or more PWM signals is
+ registered as <structname>struct pwm_chip</structname>. Providers are
+ expected to embed this structure in a driver-specific structure. This
+ structure contains fields that describe a particular chip.
+ </para>
+ <para>
+ A chip exposes one or more PWM signal sources, each of which exposed
+ as a <structname>struct pwm_device</structname>. Operations can be
+ performed on PWM devices to control the period, duty cycle, polarity
+ and active state of the signal.
+ </para>
+ <para>
+ Note that PWM devices are exclusive resources: they can always only be
+ used by one consumer at a time.
+ </para>
+!Iinclude/linux/pwm.h
+!Edrivers/pwm/core.c
+ </chapter>
+
</book>
--- /dev/null
+* NXP LPC18xx State Configurable Timer - Pulse Width Modulator driver
+
+Required properties:
+ - compatible: Should be "nxp,lpc1850-sct-pwm"
+ - reg: Should contain physical base address and length of pwm registers.
+ - clocks: Must contain an entry for each entry in clock-names.
+ See ../clock/clock-bindings.txt for details.
+ - clock-names: Must include the following entries.
+ - pwm: PWM operating clock.
+ - #pwm-cells: Should be 3. See pwm.txt in this directory for the description
+ of the cells format.
+
+Example:
+ pwm: pwm@40000000 {
+ compatible = "nxp,lpc1850-sct-pwm";
+ reg = <0x40000000 0x1000>;
+ clocks =<&ccu1 CLK_CPU_SCT>;
+ clock-names = "pwm";
+ #pwm-cells = <3>;
+ };
Definition: the identifier of the remote processor in the smd channel
allocation table
+- qcom,remote-pid:
+ Usage: optional
+ Value type: <u32>
+ Definition: the identifier for the remote processor as known by the rest
+ of the system.
+
= SMD DEVICES
In turn, subnodes of the "edges" represent devices tied to SMD channels on that
| ia64: | TODO |
| m32r: | .. |
| m68k: | .. |
- | metag: | .. |
+ | metag: | TODO |
| microblaze: | .. |
| mips: | ok |
| mn10300: | .. |
4.87 KVM_SET_GUEST_DEBUG
Capability: KVM_CAP_SET_GUEST_DEBUG
-Architectures: x86, s390, ppc
+Architectures: x86, s390, ppc, arm64
Type: vcpu ioctl
Parameters: struct kvm_guest_debug (in)
Returns: 0 on success; -1 on error
The top 16 bits of the control field are architecture specific control
flags which can include the following:
- - KVM_GUESTDBG_USE_SW_BP: using software breakpoints [x86]
- - KVM_GUESTDBG_USE_HW_BP: using hardware breakpoints [x86, s390]
+ - KVM_GUESTDBG_USE_SW_BP: using software breakpoints [x86, arm64]
+ - KVM_GUESTDBG_USE_HW_BP: using hardware breakpoints [x86, s390, arm64]
- KVM_GUESTDBG_INJECT_DB: inject DB type exception [x86]
- KVM_GUESTDBG_INJECT_BP: inject BP type exception [x86]
- KVM_GUESTDBG_EXIT_PENDING: trigger an immediate guest exit [s390]
The second part of the structure is architecture specific and
typically contains a set of debug registers.
+For arm64 the number of debug registers is implementation defined and
+can be determined by querying the KVM_CAP_GUEST_DEBUG_HW_BPS and
+KVM_CAP_GUEST_DEBUG_HW_WPS capabilities which return a positive number
+indicating the number of supported registers.
+
When debug events exit the main run loop with the reason
KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run
structure containing architecture specific debug information.
where kvm expects application code to place the data for the next
KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array.
+ /* KVM_EXIT_DEBUG */
struct {
struct kvm_debug_exit_arch arch;
} debug;
-Unused.
+If the exit_reason is KVM_EXIT_DEBUG, then a vcpu is processing a debug event
+for which architecture specific information is returned.
/* KVM_EXIT_MMIO */
struct {
NIOS2 ARCHITECTURE
M: Ley Foon Tan <lftan@altera.com>
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
-T: git git://git.rocketboards.org/linux-socfpga-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
S: Maintained
F: arch/nios2/
min-microvolt = <1100000>;
max-microvolt = <2700000>;
};
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ cooling-maps {
+ map0 {
+ /* Correspond to 500MHz at freq_table */
+ cooling-device = <&cpu0 5 5>;
+ };
+ map1 {
+ /* Correspond to 200MHz at freq_table */
+ cooling-device = <&cpu0 8 8>;
+ };
+ };
+ };
+ };
};
&adc {
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&exynos_usbphy {
status = "okay";
};
min-microvolt = <1100000>;
max-microvolt = <2700000>;
};
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ cooling-maps {
+ map0 {
+ /* Corresponds to 500MHz */
+ cooling-device = <&cpu0 5 5>;
+ };
+ map1 {
+ /* Corresponds to 200MHz */
+ cooling-device = <&cpu0 8 8>;
+ };
+ };
+ };
+ };
};
&adc {
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&exynos_usbphy {
status = "okay";
};
compatible = "arm,cortex-a7";
reg = <0>;
clock-frequency = <1000000000>;
+ clocks = <&cmu CLK_ARM_CLK>;
+ clock-names = "cpu";
+ #cooling-cells = <2>;
+
+ operating-points = <
+ 1000000 1150000
+ 900000 1112500
+ 800000 1075000
+ 700000 1037500
+ 600000 1000000
+ 500000 962500
+ 400000 925000
+ 300000 887500
+ 200000 850000
+ 100000 850000
+ >;
};
cpu1: cpu@1 {
clocks = <&clock CLK_JPEG>;
clock-names = "jpeg";
power-domains = <&pd_cam>;
+ iommus = <&sysmmu_jpeg>;
};
hdmi: hdmi@12D00000 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA00>;
+ clocks = <&clock CLK_ARM_CLK>;
+ clock-names = "cpu";
+ operating-points-v2 = <&cpu0_opp_table>;
cooling-min-level = <13>;
cooling-max-level = <7>;
#cooling-cells = <2>; /* min followed by max */
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA01>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+ };
+
+ cpu0_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp00 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <200000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <200000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <925000>;
+ clock-latency-ns = <200000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <950000>;
+ clock-latency-ns = <200000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <975000>;
+ clock-latency-ns = <200000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <987500>;
+ clock-latency-ns = <200000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <200000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <1037500>;
+ clock-latency-ns = <200000>;
+ };
+ opp08 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1087500>;
+ clock-latency-ns = <200000>;
+ };
+ opp09 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1137500>;
+ clock-latency-ns = <200000>;
+ };
+ opp10 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1187500>;
+ clock-latency-ns = <200000>;
+ };
+ opp11 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1250000>;
+ clock-latency-ns = <200000>;
+ };
+ opp12 {
+ opp-hz = /bits/ 64 <1400000000>;
+ opp-microvolt = <1287500>;
+ clock-latency-ns = <200000>;
+ };
+ opp13 {
+ opp-hz = /bits/ 64 <1500000000>;
+ opp-microvolt = <1350000>;
+ clock-latency-ns = <200000>;
+ turbo-mode;
};
};
};
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
/* RSTN signal for eMMC */
&sd1_cd {
samsung,pin-pud = <0>;
/dts-v1/;
#include "exynos4412-odroid-common.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "Hardkernel ODROID-U3 board based on Exynos4412";
"Speakers", "SPKL",
"Speakers", "SPKR";
};
+
+&spi_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_bus>;
+ cs-gpios = <&gpb 5 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&fimd {
pinctrl-0 = <&lcd_clk &lcd_data24 &pwm1_out>;
pinctrl-names = "default";
status = "okay";
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&csis_0 {
status = "okay";
vddcore-supply = <&ldo8_reg>;
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA00>;
+ clocks = <&clock CLK_ARM_CLK>;
+ clock-names = "cpu";
+ operating-points-v2 = <&cpu0_opp_table>;
cooling-min-level = <13>;
cooling-max-level = <7>;
#cooling-cells = <2>; /* min followed by max */
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA01>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
cpu@A02 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA02>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
cpu@A03 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA03>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+ };
+
+ cpu0_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp00 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <200000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <200000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <925000>;
+ clock-latency-ns = <200000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <950000>;
+ clock-latency-ns = <200000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <975000>;
+ clock-latency-ns = <200000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <987500>;
+ clock-latency-ns = <200000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <200000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <1037500>;
+ clock-latency-ns = <200000>;
+ };
+ opp08 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1087500>;
+ clock-latency-ns = <200000>;
+ };
+ opp09 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1137500>;
+ clock-latency-ns = <200000>;
+ };
+ opp10 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1187500>;
+ clock-latency-ns = <200000>;
+ };
+ opp11 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1250000>;
+ clock-latency-ns = <200000>;
+ };
+ opp12 {
+ opp-hz = /bits/ 64 <1400000000>;
+ opp-microvolt = <1287500>;
+ clock-latency-ns = <200000>;
+ };
+ opp13 {
+ opp-hz = /bits/ 64 <1500000000>;
+ opp-microvolt = <1350000>;
+ clock-latency-ns = <200000>;
+ turbo-mode;
};
};
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&dp {
status = "okay";
samsung,color-space = <0>;
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&dp {
samsung,color-space = <0>;
samsung,dynamic-range = <0>;
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&dp {
status = "okay";
pinctrl-names = "default";
status = "okay";
samsung,spi-src-clk = <0>;
num-cs = <1>;
+ cs-gpios = <&gpa2 5 GPIO_ACTIVE_HIGH>;
};
&usbdrd_dwc3 {
};
};
+&cpu0 {
+ cpu0-supply = <&buck2_reg>;
+};
+
&dp {
status = "okay";
pinctrl-names = "default";
compatible = "arm,cortex-a15";
reg = <0>;
clock-frequency = <1700000000>;
+ clocks = <&clock CLK_ARM_CLK>;
+ clock-names = "cpu";
+ clock-latency = <140000>;
+
+ operating-points = <
+ 1700000 1300000
+ 1600000 1250000
+ 1500000 1225000
+ 1400000 1200000
+ 1300000 1150000
+ 1200000 1125000
+ 1100000 1100000
+ 1000000 1075000
+ 900000 1050000
+ 800000 1025000
+ 700000 1012500
+ 600000 1000000
+ 500000 975000
+ 400000 950000
+ 300000 937500
+ 200000 925000
+ >;
cooling-min-level = <15>;
cooling-max-level = <9>;
#cooling-cells = <2>; /* min followed by max */
--- /dev/null
+/*
+ * SAMSUNG EXYNOS5422 SoC cpu device tree source
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * The only difference between EXYNOS5422 and EXYNOS5800 is cpu ordering. The
+ * EXYNOS5422 is booting from Cortex-A7 core while the EXYNOS5800 is booting
+ * from Cortex-A15 core.
+ *
+ * EXYNOS5422 based board files can include this file to provide cpu ordering
+ * which could boot a cortex-a7 from cpu0.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&cpu0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control0>;
+};
+
+&cpu1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control0>;
+};
+
+&cpu2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control0>;
+};
+
+&cpu3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x103>;
+ clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control0>;
+};
+
+&cpu4 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x0>;
+ clock-frequency = <1800000000>;
+ cci-control-port = <&cci_control1>;
+};
+
+&cpu5 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x1>;
+ clock-frequency = <1800000000>;
+ cci-control-port = <&cci_control1>;
+};
+
+&cpu6 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x2>;
+ clock-frequency = <1800000000>;
+ cci-control-port = <&cci_control1>;
+};
+
+&cpu7 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x3>;
+ clock-frequency = <1800000000>;
+ cci-control-port = <&cci_control1>;
+};
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/sound/samsung-i2s.h>
#include "exynos5800.dtsi"
+#include "exynos5422-cpus.dtsi"
#include "exynos5422-cpu-thermal.dtsi"
/ {
model = "CompuLab CM-QS600";
compatible = "qcom,apq8064-cm-qs600", "qcom,apq8064";
+ aliases {
+ serial0 = &gsbi7_serial;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
soc {
pinctrl@800000 {
i2c1_pins: i2c1 {
serial1 = &gsbi6_serial;
};
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
soc {
pinctrl@800000 {
card_detect: card_detect {
model = "Qualcomm APQ8074 Dragonboard";
compatible = "qcom,apq8074-dragonboard", "qcom,apq8074";
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
soc {
serial@f991e000 {
status = "ok";
model = "Qualcomm APQ8084/IFC6540";
compatible = "qcom,apq8084-ifc6540", "qcom,apq8084";
+ aliases {
+ serial0 = &blsp2_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
soc {
serial@f995e000 {
status = "okay";
model = "Qualcomm APQ 8084-MTP";
compatible = "qcom,apq8084-mtp", "qcom,apq8084";
+ aliases {
+ serial0 = &blsp2_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
soc {
serial@f995e000 {
status = "okay";
interrupts = <0 208 0>;
};
- serial@f995e000 {
+ blsp2_uart2: serial@f995e000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xf995e000 0x1000>;
interrupts = <0 114 0x0>;
model = "Qualcomm IPQ8064/AP148";
compatible = "qcom,ipq8064-ap148", "qcom,ipq8064";
+ aliases {
+ serial0 = &gsbi4_serial;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
syscon-tcsr = <&tcsr>;
- serial@16340000 {
+ gsbi4_serial: serial@16340000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16340000 0x1000>,
<0x16300000 0x1000>;
model = "Qualcomm MSM8660 SURF";
compatible = "qcom,msm8660-surf", "qcom,msm8660";
+ aliases {
+ serial0 = &gsbi12_serial;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
soc {
gsbi@19c00000 {
status = "ok";
syscon-tcsr = <&tcsr>;
- serial@19c40000 {
+ gsbi12_serial: serial@19c40000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x19c40000 0x1000>,
<0x19c00000 0x1000>;
model = "Qualcomm MSM8960 CDP";
compatible = "qcom,msm8960-cdp", "qcom,msm8960";
+ aliases {
+ serial0 = &gsbi5_serial;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
soc {
gsbi@16400000 {
status = "ok";
syscon-tcsr = <&tcsr>;
- serial@16440000 {
+ gsbi5_serial: serial@16440000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16440000 0x1000>,
<0x16400000 0x1000>;
model = "Sony Xperia Z1";
compatible = "sony,xperia-honami", "qcom,msm8974";
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
memory@0 {
reg = <0 0x40000000>, <0x40000000 0x40000000>;
device_type = "memory";
hwlocks = <&tcsr_mutex 3>;
};
- serial@f991e000 {
+ blsp1_uart2: serial@f991e000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xf991e000 0x1000>;
interrupts = <0 108 0x0>;
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_EXYNOS_CPUIDLE=y
CONFIG_VFP=y
CONFIG_CHARGER_MAX77693=y
CONFIG_CHARGER_TPS65090=y
CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_NTC_THERMISTOR=y
CONFIG_SENSORS_PWM_FAN=y
CONFIG_SENSORS_INA2XX=y
CONFIG_THERMAL=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_SAMSUNG=y
CONFIG_SND_SOC_SNOW=y
+CONFIG_SND_SOC_ODROIDX2=y
+CONFIG_SND_SIMPLE_CARD=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
CONFIG_POWER_RESET_RMOBILE=y
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM95245=y
+CONFIG_SENSORS_NTC_THERMISTOR=m
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_RCAR_THERMAL=y
CONFIG_REGULATOR_MAX8973=y
CONFIG_REGULATOR_MAX77686=y
CONFIG_REGULATOR_MAX77693=m
+CONFIG_REGULATOR_MAX77802=m
CONFIG_REGULATOR_PALMAS=y
+CONFIG_REGULATOR_PBIAS=y
CONFIG_REGULATOR_PWM=m
CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_AB8500_USB=y
CONFIG_KEYSTONE_USB_PHY=y
CONFIG_OMAP_USB3=y
-CONFIG_SAMSUNG_USB2PHY=y
-CONFIG_SAMSUNG_USB3PHY=y
CONFIG_USB_GPIO_VBUS=y
CONFIG_USB_ISP1301=y
CONFIG_USB_MXS_PHY=y
CONFIG_TI_AEMIF=y
CONFIG_IIO=y
CONFIG_AT91_ADC=m
+CONFIG_EXYNOS_ADC=m
CONFIG_XILINX_XADC=y
CONFIG_AK8975=y
CONFIG_PWM=y
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arm_init_debug(void) {}
+static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+
#endif /* __ARM_KVM_HOST_H__ */
unsigned long __pfn_to_mfn(unsigned long pfn);
extern struct rb_root phys_to_mach;
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+ return pfn;
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+ return gfn;
+}
+
+/* Pseudo-physical <-> BUS conversion */
+static inline unsigned long pfn_to_bfn(unsigned long pfn)
{
unsigned long mfn;
return pfn;
}
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long bfn_to_pfn(unsigned long bfn)
{
- return mfn;
+ return bfn;
}
-#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
+#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
-/* VIRT <-> MACHINE conversion */
-#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
-#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
+#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT))
/* Only used in PV code. But ARM guests are always HVM. */
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
- unsigned long mfn);
+ unsigned long bfn);
unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */
if (ret)
goto out_free_stage2_pgd;
+ kvm_vgic_early_init(kvm);
kvm_timer_init(kvm);
/* Mark the initial VMID generation invalid */
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
+ kvm_vgic_vcpu_early_init(vcpu);
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
+ kvm_arm_reset_debug_ptr(vcpu);
+
return 0;
}
kvm_arm_set_running_vcpu(NULL);
}
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
-{
- return -EINVAL;
-}
-
-
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
if (vcpu->arch.pause)
vcpu_pause(vcpu);
- kvm_vgic_flush_hwstate(vcpu);
+ /*
+ * Disarming the background timer must be done in a
+ * preemptible context, as this call may sleep.
+ */
kvm_timer_flush_hwstate(vcpu);
+ /*
+ * Preparing the interrupts to be injected also
+ * involves poking the GIC, which must be done in a
+ * non-preemptible context.
+ */
preempt_disable();
+ kvm_vgic_flush_hwstate(vcpu);
+
local_irq_disable();
/*
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
local_irq_enable();
+ kvm_vgic_sync_hwstate(vcpu);
preempt_enable();
kvm_timer_sync_hwstate(vcpu);
- kvm_vgic_sync_hwstate(vcpu);
continue;
}
+ kvm_arm_setup_debug(vcpu);
+
/**************************************************************
* Enter the guest
*/
* Back from guest
*************************************************************/
+ kvm_arm_clear_debug(vcpu);
+
/*
* We may have taken a host interrupt in HYP mode (ie
* while executing the guest). This interrupt is still
*/
kvm_guest_exit();
trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
- preempt_enable();
+ kvm_vgic_sync_hwstate(vcpu);
+
+ preempt_enable();
kvm_timer_sync_hwstate(vcpu);
- kvm_vgic_sync_hwstate(vcpu);
ret = handle_exit(vcpu, run, ret);
}
vector_ptr = (unsigned long)__kvm_hyp_vector;
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
+
+ kvm_arm_init_debug();
}
static int hyp_init_cpu_notify(struct notifier_block *self,
{
return -EINVAL;
}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
@ Check syndrome register
mrc p15, 4, r1, c5, c2, 0 @ HSR
lsr r0, r1, #HSR_EC_SHIFT
-#ifdef CONFIG_VFPv3
- cmp r0, #HSR_EC_CP_0_13
- beq switch_to_guest_vfp
-#endif
cmp r0, #HSR_EC_HVC
bne guest_trap @ Not HVC instr.
cmp r2, #0
bne guest_trap @ Guest called HVC
-host_switch_to_hyp:
+ /*
+ * Getting here means host called HVC, we shift parameters and branch
+ * to Hyp function.
+ */
pop {r0, r1, r2}
/* Check for __hyp_get_vectors */
@ Check if we need the fault information
lsr r1, r1, #HSR_EC_SHIFT
+#ifdef CONFIG_VFPv3
+ cmp r1, #HSR_EC_CP_0_13
+ beq switch_to_guest_vfp
+#endif
cmp r1, #HSR_EC_IABT
mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
beq 2f
*/
#ifdef CONFIG_VFPv3
switch_to_guest_vfp:
- load_vcpu @ Load VCPU pointer to r0
push {r3-r7}
@ NEON/VFP used. Turn on VFP access.
kvm_reset_coprocs(vcpu);
/* Reset arch_timer context */
- kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
-
- return 0;
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
select ARM_AMBA
select ARM_GIC
select COMMON_CLK_SAMSUNG
+ select EXYNOS_THERMAL
select HAVE_ARM_SCU if SMP
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select PM_GENERIC_DOMAINS if PM
select S5P_DEV_MFC
select SRAM
+ select THERMAL
select MFD_SYSCON
help
Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
}
static const struct of_device_id exynos_cpufreq_matches[] = {
+ { .compatible = "samsung,exynos3250", .data = "cpufreq-dt" },
{ .compatible = "samsung,exynos4210", .data = "cpufreq-dt" },
+ { .compatible = "samsung,exynos4212", .data = "cpufreq-dt" },
+ { .compatible = "samsung,exynos4412", .data = "cpufreq-dt" },
+ { .compatible = "samsung,exynos5250", .data = "cpufreq-dt" },
{ /* sentinel */ }
};
static __initdata struct device_node *xen_node;
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *mfn, int nr,
+ xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages)
{
- return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+ return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages);
}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Not used by XENFEAT_auto_translated guests. */
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t mfn, int nr,
+ xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages)
{
return -ENOSYS;
}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
return xen_xlate_unmap_gfn_range(vma, nr, pages);
}
-EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
static void xen_percpu_init(void)
{
bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
- unsigned long mfn)
+ unsigned long bfn)
{
- return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
+ return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
}
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
#ifndef __ASM_HW_BREAKPOINT_H
#define __ASM_HW_BREAKPOINT_H
+#include <asm/cputype.h>
+
#ifdef __KERNEL__
struct arch_hw_breakpoint_ctrl {
extern struct pmu perf_ops_bp;
+/* Determine number of BRP registers available. */
+static inline int get_num_brps(void)
+{
+ return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
+}
+
+/* Determine number of WRP registers available. */
+static inline int get_num_wrps(void)
+{
+ return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_BREAKPOINT_H */
#define HSTR_EL2_TTEE (1 << 16)
#define HSTR_EL2_T(x) (1 << x)
+/* Hyp Coproccessor Trap Register Shifts */
+#define CPTR_EL2_TFP_SHIFT 10
+
/* Hyp Coprocessor Trap Register */
#define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TTA (1 << 20)
-#define CPTR_EL2_TFP (1 << 10)
+#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_TDRA (1 << 11)
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
#define PAR_EL1 21 /* Physical Address Register */
#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
-#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
-#define DBGBCR15_EL1 38
-#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
-#define DBGBVR15_EL1 54
-#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
-#define DBGWCR15_EL1 70
-#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
-#define DBGWVR15_EL1 86
-#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
+#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 88 /* Domain Access Control Register */
-#define IFSR32_EL2 89 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
-#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 94
+#define DACR32_EL2 24 /* Domain Access Control Register */
+#define IFSR32_EL2 25 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
+#define TEECR32_EL1 28 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 30
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern u32 __kvm_get_mdcr_el2(void);
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
/* HYP configuration */
u64 hcr_el2;
+ u32 mdcr_el2;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* Debug state */
+ /* Guest debug state */
u64 debug_flags;
+ /*
+ * We maintain more than a single set of debug registers to support
+ * debugging the guest from the host and to maintain separate host and
+ * guest state during world switches. vcpu_debug_state are the debug
+ * registers of the vcpu as the guest sees them. host_debug_state are
+ * the host registers which are saved and restored during
+ * world switches. external_debug_state contains the debug
+ * values we want to debug the guest. This is set via the
+ * KVM_SET_GUEST_DEBUG ioctl.
+ *
+ * debug_ptr points to the set of debug registers that should be loaded
+ * onto the hardware when running the guest.
+ */
+ struct kvm_guest_debug_arch *debug_ptr;
+ struct kvm_guest_debug_arch vcpu_debug_state;
+ struct kvm_guest_debug_arch external_debug_state;
+
/* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context;
+ struct kvm_guest_debug_arch host_debug_state;
/* VGIC state */
struct vgic_cpu vgic_cpu;
* here.
*/
+ /*
+ * Guest registers we preserve during guest debugging.
+ *
+ * These shadow registers are updated by the kvm_handle_sys_reg
+ * trap handler if the guest accesses or updates them while we
+ * are using guest debug.
+ */
+ struct {
+ u32 mdscr_el1;
+ } guest_debug_preserved;
+
/* Don't run the guest */
bool pause;
hyp_stack_ptr, vector_ptr);
}
-struct vgic_sr_vectors {
- void *save_vgic;
- void *restore_vgic;
-};
-
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+void kvm_arm_init_debug(void);
+void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
+void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
+void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+
#endif /* __ARM64_KVM_HOST_H__ */
struct user_fpsimd_state fp_regs;
};
-/* Supported Processor Types */
+/*
+ * Supported CPU Targets - Adding a new target type is not recommended,
+ * unless there are some special registers not supported by the
+ * genericv8 syreg table.
+ */
#define KVM_ARM_TARGET_AEM_V8 0
#define KVM_ARM_TARGET_FOUNDATION_V8 1
#define KVM_ARM_TARGET_CORTEX_A57 2
#define KVM_ARM_TARGET_XGENE_POTENZA 3
#define KVM_ARM_TARGET_CORTEX_A53 4
+/* Generic ARM v8 target */
+#define KVM_ARM_TARGET_GENERIC_V8 5
-#define KVM_ARM_NUM_TARGETS 5
+#define KVM_ARM_NUM_TARGETS 6
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
struct kvm_fpu {
};
+/*
+ * See v8 ARM ARM D7.3: Debug Registers
+ *
+ * The architectural limit is 16 debug registers of each type although
+ * in practice there are usually less (see ID_AA64DFR0_EL1).
+ *
+ * Although the control registers are architecturally defined as 32
+ * bits wide we use a 64 bit structure here to keep parity with
+ * KVM_GET/SET_ONE_REG behaviour which treats all system registers as
+ * 64 bit values. It also allows for the possibility of the
+ * architecture expanding the control registers without having to
+ * change the userspace ABI.
+ */
+#define KVM_ARM_MAX_DBG_REGS 16
struct kvm_guest_debug_arch {
+ __u64 dbg_bcr[KVM_ARM_MAX_DBG_REGS];
+ __u64 dbg_bvr[KVM_ARM_MAX_DBG_REGS];
+ __u64 dbg_wcr[KVM_ARM_MAX_DBG_REGS];
+ __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
};
struct kvm_debug_exit_arch {
+ __u32 hsr;
+ __u64 far; /* used for watchpoints */
};
+/*
+ * Architecture specific defines for kvm_guest_debug->control
+ */
+
+#define KVM_GUESTDBG_USE_SW_BP (1 << 16)
+#define KVM_GUESTDBG_USE_HW (1 << 17)
+
struct kvm_sync_regs {
};
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
+ DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr));
+ DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr));
+ DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr));
+ DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr));
+ DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
+ DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
+ DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
- DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
static int core_num_brps;
static int core_num_wrps;
-/* Determine number of BRP registers available. */
-static int get_num_brps(void)
-{
- return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
-}
-
-/* Determine number of WRP registers available. */
-static int get_num_wrps(void)
-{
- return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
-}
-
int hw_breakpoint_slots(int type)
{
/*
kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
-kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
+kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
--- /dev/null
+/*
+ * Debug and Guest Debug support
+ *
+ * Copyright (C) 2015 - Linaro Ltd
+ * Author: Alex Bennée <alex.bennee@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/hw_breakpoint.h>
+
+#include <asm/debug-monitors.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_emulate.h>
+
+#include "trace.h"
+
+/* These are the bits of MDSCR_EL1 we may manipulate */
+#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
+ DBG_MDSCR_KDE | \
+ DBG_MDSCR_MDE)
+
+static DEFINE_PER_CPU(u32, mdcr_el2);
+
+/**
+ * save/restore_guest_debug_regs
+ *
+ * For some debug operations we need to tweak some guest registers. As
+ * a result we need to save the state of those registers before we
+ * make those modifications.
+ *
+ * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
+ * after we have restored the preserved value to the main context.
+ */
+static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1);
+
+ trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
+ vcpu->arch.guest_debug_preserved.mdscr_el1);
+}
+
+static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+ vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1;
+
+ trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
+ vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+/**
+ * kvm_arm_init_debug - grab what we need for debug
+ *
+ * Currently the sole task of this function is to retrieve the initial
+ * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
+ * presumably been set-up by some knowledgeable bootcode.
+ *
+ * It is called once per-cpu during CPU hyp initialisation.
+ */
+
+void kvm_arm_init_debug(void)
+{
+ __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
+}
+
+/**
+ * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+ */
+
+void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
+}
+
+/**
+ * kvm_arm_setup_debug - set up debug related stuff
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * This is called before each entry into the hypervisor to setup any
+ * debug related registers. Currently this just ensures we will trap
+ * access to:
+ * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+ * - Debug ROM Address (MDCR_EL2_TDRA)
+ * - OS related registers (MDCR_EL2_TDOSA)
+ *
+ * Additionally, KVM only traps guest accesses to the debug registers if
+ * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
+ * flag on vcpu->arch.debug_flags). Since the guest must not interfere
+ * with the hardware state when debugging the guest, we must ensure that
+ * trapping is enabled whenever we are debugging the guest using the
+ * debug registers.
+ */
+
+void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+{
+ bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
+
+ trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
+
+ vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+ vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+ MDCR_EL2_TPMCR |
+ MDCR_EL2_TDRA |
+ MDCR_EL2_TDOSA);
+
+ /* Is Guest debugging in effect? */
+ if (vcpu->guest_debug) {
+ /* Route all software debug exceptions to EL2 */
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+
+ /* Save guest debug state */
+ save_guest_debug_regs(vcpu);
+
+ /*
+ * Single Step (ARM ARM D2.12.3 The software step state
+ * machine)
+ *
+ * If we are doing Single Step we need to manipulate
+ * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
+ * step has occurred the hypervisor will trap the
+ * debug exception and we return to userspace.
+ *
+ * If the guest attempts to single step its userspace
+ * we would have to deal with a trapped exception
+ * while in the guest kernel. Because this would be
+ * hard to unwind we suppress the guest's ability to
+ * do so by masking MDSCR_EL.SS.
+ *
+ * This confuses guest debuggers which use
+ * single-step behind the scenes but everything
+ * returns to normal once the host is no longer
+ * debugging the system.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
+ vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_SS;
+ } else {
+ vcpu_sys_reg(vcpu, MDSCR_EL1) &= ~DBG_MDSCR_SS;
+ }
+
+ trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
+
+ /*
+ * HW Breakpoints and watchpoints
+ *
+ * We simply switch the debug_ptr to point to our new
+ * external_debug_state which has been populated by the
+ * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
+ * mechanism ensures the registers are updated on the
+ * world switch.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ /* Enable breakpoints/watchpoints */
+ vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_MDE;
+
+ vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ trap_debug = true;
+
+ trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ &vcpu->arch.debug_ptr->dbg_bcr[0],
+ &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+ trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+ &vcpu->arch.debug_ptr->dbg_wcr[0],
+ &vcpu->arch.debug_ptr->dbg_wvr[0]);
+ }
+ }
+
+ BUG_ON(!vcpu->guest_debug &&
+ vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
+
+ /* Trap debug register access */
+ if (trap_debug)
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+
+ trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+ trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
+{
+ trace_kvm_arm_clear_debug(vcpu->guest_debug);
+
+ if (vcpu->guest_debug) {
+ restore_guest_debug_regs(vcpu);
+
+ /*
+ * If we were using HW debug we need to restore the
+ * debug_ptr to the guest debug state.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ kvm_arm_reset_debug_ptr(vcpu);
+
+ trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ &vcpu->arch.debug_ptr->dbg_bcr[0],
+ &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+ trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+ &vcpu->arch.debug_ptr->dbg_wcr[0],
+ &vcpu->arch.debug_ptr->dbg_wvr[0]);
+ }
+ }
+}
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include "trace.h"
+
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
break;
};
- return -EINVAL;
+ /* Return a default generic target */
+ return KVM_ARM_TARGET_GENERIC_V8;
}
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
return -EINVAL;
}
+
+#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
+ KVM_GUESTDBG_USE_SW_BP | \
+ KVM_GUESTDBG_USE_HW | \
+ KVM_GUESTDBG_SINGLESTEP)
+
+/**
+ * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
+ * @kvm: pointer to the KVM struct
+ * @kvm_guest_debug: the ioctl data buffer
+ *
+ * This sets up and enables the VM for guest debugging. Userspace
+ * passes in a control flag to enable different debug types and
+ * potentially other architecture specific information in the rest of
+ * the structure.
+ */
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ trace_kvm_set_guest_debug(vcpu, dbg->control);
+
+ if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
+ return -EINVAL;
+
+ if (dbg->control & KVM_GUESTDBG_ENABLE) {
+ vcpu->guest_debug = dbg->control;
+
+ /* Hardware assisted Break and Watch points */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ vcpu->arch.external_debug_state = dbg->arch;
+ }
+
+ } else {
+ /* If not enabled clear all flags */
+ vcpu->guest_debug = 0;
+ }
+ return 0;
+}
return 1;
}
+/**
+ * kvm_handle_guest_debug - handle a debug exception instruction
+ *
+ * @vcpu: the vcpu pointer
+ * @run: access to the kvm_run structure for results
+ *
+ * We route all debug exceptions through the same handler. If both the
+ * guest and host are using the same debug facilities it will be up to
+ * userspace to re-inject the correct exception for guest delivery.
+ *
+ * @return: 0 (while setting run->exit_reason), -1 for error
+ */
+static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int ret = 0;
+
+ run->exit_reason = KVM_EXIT_DEBUG;
+ run->debug.arch.hsr = hsr;
+
+ switch (hsr >> ESR_ELx_EC_SHIFT) {
+ case ESR_ELx_EC_WATCHPT_LOW:
+ run->debug.arch.far = vcpu->arch.fault.far_el2;
+ /* fall through */
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_BKPT32:
+ case ESR_ELx_EC_BRK64:
+ break;
+ default:
+ kvm_err("%s: un-handled case hsr: %#08x\n",
+ __func__, (unsigned int) hsr);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
+ [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
stp x24, x25, [x3, #160]
.endm
-.macro save_debug
- // x2: base address for cpu context
- // x3: tmp register
-
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- mrs x20, dbgbcr15_el1
- mrs x19, dbgbcr14_el1
- mrs x18, dbgbcr13_el1
- mrs x17, dbgbcr12_el1
- mrs x16, dbgbcr11_el1
- mrs x15, dbgbcr10_el1
- mrs x14, dbgbcr9_el1
- mrs x13, dbgbcr8_el1
- mrs x12, dbgbcr7_el1
- mrs x11, dbgbcr6_el1
- mrs x10, dbgbcr5_el1
- mrs x9, dbgbcr4_el1
- mrs x8, dbgbcr3_el1
- mrs x7, dbgbcr2_el1
- mrs x6, dbgbcr1_el1
- mrs x5, dbgbcr0_el1
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
+.macro save_debug type
+ // x4: pointer to register set
+ // x5: number of registers to skip
+ // x6..x22 trashed
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- mrs x20, dbgbvr15_el1
- mrs x19, dbgbvr14_el1
- mrs x18, dbgbvr13_el1
- mrs x17, dbgbvr12_el1
- mrs x16, dbgbvr11_el1
- mrs x15, dbgbvr10_el1
- mrs x14, dbgbvr9_el1
- mrs x13, dbgbvr8_el1
- mrs x12, dbgbvr7_el1
- mrs x11, dbgbvr6_el1
- mrs x10, dbgbvr5_el1
- mrs x9, dbgbvr4_el1
- mrs x8, dbgbvr3_el1
- mrs x7, dbgbvr2_el1
- mrs x6, dbgbvr1_el1
- mrs x5, dbgbvr0_el1
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- mrs x20, dbgwcr15_el1
- mrs x19, dbgwcr14_el1
- mrs x18, dbgwcr13_el1
- mrs x17, dbgwcr12_el1
- mrs x16, dbgwcr11_el1
- mrs x15, dbgwcr10_el1
- mrs x14, dbgwcr9_el1
- mrs x13, dbgwcr8_el1
- mrs x12, dbgwcr7_el1
- mrs x11, dbgwcr6_el1
- mrs x10, dbgwcr5_el1
- mrs x9, dbgwcr4_el1
- mrs x8, dbgwcr3_el1
- mrs x7, dbgwcr2_el1
- mrs x6, dbgwcr1_el1
- mrs x5, dbgwcr0_el1
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- mrs x20, dbgwvr15_el1
- mrs x19, dbgwvr14_el1
- mrs x18, dbgwvr13_el1
- mrs x17, dbgwvr12_el1
- mrs x16, dbgwvr11_el1
- mrs x15, dbgwvr10_el1
- mrs x14, dbgwvr9_el1
- mrs x13, dbgwvr8_el1
- mrs x12, dbgwvr7_el1
- mrs x11, dbgwvr6_el1
- mrs x10, dbgwvr5_el1
- mrs x9, dbgwvr4_el1
- mrs x8, dbgwvr3_el1
- mrs x7, dbgwvr2_el1
- mrs x6, dbgwvr1_el1
- mrs x5, dbgwvr0_el1
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-
+ mrs x21, \type\()15_el1
+ mrs x20, \type\()14_el1
+ mrs x19, \type\()13_el1
+ mrs x18, \type\()12_el1
+ mrs x17, \type\()11_el1
+ mrs x16, \type\()10_el1
+ mrs x15, \type\()9_el1
+ mrs x14, \type\()8_el1
+ mrs x13, \type\()7_el1
+ mrs x12, \type\()6_el1
+ mrs x11, \type\()5_el1
+ mrs x10, \type\()4_el1
+ mrs x9, \type\()3_el1
+ mrs x8, \type\()2_el1
+ mrs x7, \type\()1_el1
+ mrs x6, \type\()0_el1
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- mrs x21, mdccint_el1
- str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ str x21, [x4, #(15 * 8)]
+ str x20, [x4, #(14 * 8)]
+ str x19, [x4, #(13 * 8)]
+ str x18, [x4, #(12 * 8)]
+ str x17, [x4, #(11 * 8)]
+ str x16, [x4, #(10 * 8)]
+ str x15, [x4, #(9 * 8)]
+ str x14, [x4, #(8 * 8)]
+ str x13, [x4, #(7 * 8)]
+ str x12, [x4, #(6 * 8)]
+ str x11, [x4, #(5 * 8)]
+ str x10, [x4, #(4 * 8)]
+ str x9, [x4, #(3 * 8)]
+ str x8, [x4, #(2 * 8)]
+ str x7, [x4, #(1 * 8)]
+ str x6, [x4, #(0 * 8)]
.endm
.macro restore_sysregs
msr mdscr_el1, x25
.endm
-.macro restore_debug
- // x2: base address for cpu context
- // x3: tmp register
-
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+.macro restore_debug type
+ // x4: pointer to register set
+ // x5: number of registers to skip
+ // x6..x22 trashed
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- msr dbgbcr15_el1, x20
- msr dbgbcr14_el1, x19
- msr dbgbcr13_el1, x18
- msr dbgbcr12_el1, x17
- msr dbgbcr11_el1, x16
- msr dbgbcr10_el1, x15
- msr dbgbcr9_el1, x14
- msr dbgbcr8_el1, x13
- msr dbgbcr7_el1, x12
- msr dbgbcr6_el1, x11
- msr dbgbcr5_el1, x10
- msr dbgbcr4_el1, x9
- msr dbgbcr3_el1, x8
- msr dbgbcr2_el1, x7
- msr dbgbcr1_el1, x6
- msr dbgbcr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
+ ldr x21, [x4, #(15 * 8)]
+ ldr x20, [x4, #(14 * 8)]
+ ldr x19, [x4, #(13 * 8)]
+ ldr x18, [x4, #(12 * 8)]
+ ldr x17, [x4, #(11 * 8)]
+ ldr x16, [x4, #(10 * 8)]
+ ldr x15, [x4, #(9 * 8)]
+ ldr x14, [x4, #(8 * 8)]
+ ldr x13, [x4, #(7 * 8)]
+ ldr x12, [x4, #(6 * 8)]
+ ldr x11, [x4, #(5 * 8)]
+ ldr x10, [x4, #(4 * 8)]
+ ldr x9, [x4, #(3 * 8)]
+ ldr x8, [x4, #(2 * 8)]
+ ldr x7, [x4, #(1 * 8)]
+ ldr x6, [x4, #(0 * 8)]
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- msr dbgbvr15_el1, x20
- msr dbgbvr14_el1, x19
- msr dbgbvr13_el1, x18
- msr dbgbvr12_el1, x17
- msr dbgbvr11_el1, x16
- msr dbgbvr10_el1, x15
- msr dbgbvr9_el1, x14
- msr dbgbvr8_el1, x13
- msr dbgbvr7_el1, x12
- msr dbgbvr6_el1, x11
- msr dbgbvr5_el1, x10
- msr dbgbvr4_el1, x9
- msr dbgbvr3_el1, x8
- msr dbgbvr2_el1, x7
- msr dbgbvr1_el1, x6
- msr dbgbvr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- msr dbgwcr15_el1, x20
- msr dbgwcr14_el1, x19
- msr dbgwcr13_el1, x18
- msr dbgwcr12_el1, x17
- msr dbgwcr11_el1, x16
- msr dbgwcr10_el1, x15
- msr dbgwcr9_el1, x14
- msr dbgwcr8_el1, x13
- msr dbgwcr7_el1, x12
- msr dbgwcr6_el1, x11
- msr dbgwcr5_el1, x10
- msr dbgwcr4_el1, x9
- msr dbgwcr3_el1, x8
- msr dbgwcr2_el1, x7
- msr dbgwcr1_el1, x6
- msr dbgwcr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- msr dbgwvr15_el1, x20
- msr dbgwvr14_el1, x19
- msr dbgwvr13_el1, x18
- msr dbgwvr12_el1, x17
- msr dbgwvr11_el1, x16
- msr dbgwvr10_el1, x15
- msr dbgwvr9_el1, x14
- msr dbgwvr8_el1, x13
- msr dbgwvr7_el1, x12
- msr dbgwvr6_el1, x11
- msr dbgwvr5_el1, x10
- msr dbgwvr4_el1, x9
- msr dbgwvr3_el1, x8
- msr dbgwvr2_el1, x7
- msr dbgwvr1_el1, x6
- msr dbgwvr0_el1, x5
-
- ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
- msr mdccint_el1, x21
+ msr \type\()15_el1, x21
+ msr \type\()14_el1, x20
+ msr \type\()13_el1, x19
+ msr \type\()12_el1, x18
+ msr \type\()11_el1, x17
+ msr \type\()10_el1, x16
+ msr \type\()9_el1, x15
+ msr \type\()8_el1, x14
+ msr \type\()7_el1, x13
+ msr \type\()6_el1, x12
+ msr \type\()5_el1, x11
+ msr \type\()4_el1, x10
+ msr \type\()3_el1, x9
+ msr \type\()2_el1, x8
+ msr \type\()1_el1, x7
+ msr \type\()0_el1, x6
.endm
.macro skip_32bit_state tmp, target
tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
.endm
+/*
+ * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
+ */
+.macro skip_fpsimd_state tmp, target
+ mrs \tmp, cptr_el2
+ tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
+.endm
+
.macro compute_debug_state target
// Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
// is set, we do a full save/restore cycle and disable trapping.
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
mrs x4, dacr32_el2
mrs x5, ifsr32_el2
- mrs x6, fpexc32_el2
stp x4, x5, [x3]
- str x6, [x3, #16]
+ skip_fpsimd_state x8, 3f
+ mrs x6, fpexc32_el2
+ str x6, [x3, #16]
+3:
skip_debug_state x8, 2f
mrs x7, dbgvcr32_el2
str x7, [x3, #24]
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
ldp x4, x5, [x3]
- ldr x6, [x3, #16]
msr dacr32_el2, x4
msr ifsr32_el2, x5
- msr fpexc32_el2, x6
skip_debug_state x8, 2f
ldr x7, [x3, #24]
.macro activate_traps
ldr x2, [x0, #VCPU_HCR_EL2]
+
+ /*
+ * We are about to set CPTR_EL2.TFP to trap all floating point
+ * register accesses to EL2, however, the ARM ARM clearly states that
+ * traps are only taken to EL2 if the operation would not otherwise
+ * trap to EL1. Therefore, always make sure that for 32-bit guests,
+ * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+ */
+ tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
+ mov x3, #(1 << 30)
+ msr fpexc32_el2, x3
+ isb
+99:
msr hcr_el2, x2
mov x2, #CPTR_EL2_TTA
+ orr x2, x2, #CPTR_EL2_TFP
msr cptr_el2, x2
mov x2, #(1 << 15) // Trap CP15 Cr=15
msr hstr_el2, x2
- mrs x2, mdcr_el2
- and x2, x2, #MDCR_EL2_HPMN_MASK
- orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
- orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
-
- // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
- // if not dirty.
- ldr x3, [x0, #VCPU_DEBUG_FLAGS]
- tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
- orr x2, x2, #MDCR_EL2_TDA
-1:
+ // Monitor Debug Config - see kvm_arm_setup_debug()
+ ldr x2, [x0, #VCPU_MDCR_EL2]
msr mdcr_el2, x2
.endm
.macro deactivate_traps
mov x2, #HCR_RW
msr hcr_el2, x2
- msr cptr_el2, xzr
msr hstr_el2, xzr
mrs x2, mdcr_el2
restore_sysregs
ret
+/* Save debug state */
__save_debug:
- save_debug
+ // x2: ptr to CPU context
+ // x3: ptr to debug reg struct
+ // x4/x5/x6-22/x24-26: trashed
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ mov x5, x24
+ add x4, x3, #DEBUG_BCR
+ save_debug dbgbcr
+ add x4, x3, #DEBUG_BVR
+ save_debug dbgbvr
+
+ mov x5, x25
+ add x4, x3, #DEBUG_WCR
+ save_debug dbgwcr
+ add x4, x3, #DEBUG_WVR
+ save_debug dbgwvr
+
+ mrs x21, mdccint_el1
+ str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
ret
+/* Restore debug state */
__restore_debug:
- restore_debug
+ // x2: ptr to CPU context
+ // x3: ptr to debug reg struct
+ // x4/x5/x6-22/x24-26: trashed
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ mov x5, x24
+ add x4, x3, #DEBUG_BCR
+ restore_debug dbgbcr
+ add x4, x3, #DEBUG_BVR
+ restore_debug dbgbvr
+
+ mov x5, x25
+ add x4, x3, #DEBUG_WCR
+ restore_debug dbgwcr
+ add x4, x3, #DEBUG_WVR
+ restore_debug dbgwvr
+
+ ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ msr mdccint_el1, x21
+
ret
__save_fpsimd:
+ skip_fpsimd_state x3, 1f
save_fpsimd
- ret
+1: ret
__restore_fpsimd:
+ skip_fpsimd_state x3, 1f
restore_fpsimd
- ret
+1: ret
+
+switch_to_guest_fpsimd:
+ push x4, lr
+
+ mrs x2, cptr_el2
+ bic x2, x2, #CPTR_EL2_TFP
+ msr cptr_el2, x2
+ isb
+
+ mrs x0, tpidr_el2
+
+ ldr x2, [x0, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x2
+ bl __save_fpsimd
+
+ add x2, x0, #VCPU_CONTEXT
+ bl __restore_fpsimd
+
+ skip_32bit_state x3, 1f
+ ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
+ msr fpexc32_el2, x4
+1:
+ pop x4, lr
+ pop x2, x3
+ pop x0, x1
+
+ eret
/*
* u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
kern_hyp_va x2
save_host_regs
- bl __save_fpsimd
bl __save_sysregs
compute_debug_state 1f
+ add x3, x0, #VCPU_HOST_DEBUG_STATE
bl __save_debug
1:
activate_traps
add x2, x0, #VCPU_CONTEXT
bl __restore_sysregs
- bl __restore_fpsimd
skip_debug_state x3, 1f
+ ldr x3, [x0, #VCPU_DEBUG_PTR]
+ kern_hyp_va x3
bl __restore_debug
1:
restore_guest_32bit_state
bl __save_sysregs
skip_debug_state x3, 1f
+ ldr x3, [x0, #VCPU_DEBUG_PTR]
+ kern_hyp_va x3
bl __save_debug
1:
save_guest_32bit_state
bl __restore_sysregs
bl __restore_fpsimd
+ /* Clear FPSIMD and Trace trapping */
+ msr cptr_el2, xzr
skip_debug_state x3, 1f
// Clear the dirty flag for the next run, as all the state has
// already been saved. Note that we nuke the whole 64bit word.
// If we ever add more flags, we'll have to be more careful...
str xzr, [x0, #VCPU_DEBUG_FLAGS]
+ add x3, x0, #VCPU_HOST_DEBUG_STATE
bl __restore_debug
1:
restore_host_regs
* x1: ESR
* x2: ESR_EC
*/
+
+ /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ cmp x2, #ESR_ELx_EC_FP_ASIMD
+ b.eq switch_to_guest_fpsimd
+
cmp x2, #ESR_ELx_EC_DABT_LOW
mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
+
+ENTRY(__kvm_get_mdcr_el2)
+ mrs x0, mdcr_el2
+ ret
+ENDPROC(__kvm_get_mdcr_el2)
+
.popsection
#include <linux/errno.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
+#include <linux/hw_breakpoint.h>
#include <kvm/arm_arch_timer.h>
return !!(pfr0 & 0x20);
}
+/**
+ * kvm_arch_dev_ioctl_check_extension
+ *
+ * We currently assume that the number of HW registers is uniform
+ * across all CPUs (see cpuinfo_sanity_check).
+ */
int kvm_arch_dev_ioctl_check_extension(long ext)
{
int r;
case KVM_CAP_ARM_EL1_32BIT:
r = cpu_has_32bit_el1();
break;
+ case KVM_CAP_GUEST_DEBUG_HW_BPS:
+ r = get_num_brps();
+ break;
+ case KVM_CAP_GUEST_DEBUG_HW_WPS:
+ r = get_num_wrps();
+ break;
+ case KVM_CAP_SET_GUEST_DEBUG:
+ r = 1;
+ break;
default:
r = 0;
}
kvm_reset_sys_regs(vcpu);
/* Reset timer */
- kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
-
- return 0;
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
#include "sys_regs.h"
+#include "trace.h"
+
/*
* All of this file is extremly similar to the ARM coproc.c, but the
* types are different. My gut feeling is that it should be pretty
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
}
+ trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
+
+ return true;
+}
+
+/*
+ * reg_to_dbg/dbg_to_reg
+ *
+ * A 32 bit write to a debug register leave top bits alone
+ * A 32 bit read from a debug register only returns the bottom bits
+ *
+ * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
+ * hyp.S code switches between host and guest values in future.
+ */
+static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ u64 *dbg_reg)
+{
+ u64 val = *vcpu_reg(vcpu, p->Rt);
+
+ if (p->is_32bit) {
+ val &= 0xffffffffUL;
+ val |= ((*dbg_reg >> 32) << 32);
+ }
+
+ *dbg_reg = val;
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+}
+
+static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ u64 *dbg_reg)
+{
+ u64 val = *dbg_reg;
+
+ if (p->is_32bit)
+ val &= 0xffffffffUL;
+
+ *vcpu_reg(vcpu, p->Rt) = val;
+}
+
+static inline bool trap_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write,
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+
return true;
}
+static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+}
+
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 amair;
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
- trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
+ trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
/* DBGBCRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
- trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
+ trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
/* DBGWVRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
- trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
+ trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
/* DBGWCRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
- trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
/*
* Architected system registers.
return true;
}
-#define DBG_BCR_BVR_WCR_WVR(n) \
- /* DBGBVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
- NULL, (cp14_DBGBVR0 + (n) * 2) }, \
- /* DBGBCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
- NULL, (cp14_DBGBCR0 + (n) * 2) }, \
- /* DBGWVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
- NULL, (cp14_DBGWVR0 + (n) * 2) }, \
- /* DBGWCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
- NULL, (cp14_DBGWCR0 + (n) * 2) }
-
-#define DBGBXVR(n) \
- { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
- NULL, cp14_DBGBXVR0 + n * 2 }
+/* AArch32 debug register mappings
+ *
+ * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
+ * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
+ *
+ * All control registers and watchpoint value registers are mapped to
+ * the lower 32 bits of their AArch64 equivalents. We share the trap
+ * handlers with the above AArch64 code which checks what mode the
+ * system is in.
+ */
+
+static inline bool trap_xvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (p->is_write) {
+ u64 val = *dbg_reg;
+
+ val &= 0xffffffffUL;
+ val |= *vcpu_reg(vcpu, p->Rt) << 32;
+ *dbg_reg = val;
+
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
+ }
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
+
+#define DBGBXVR(n) \
+ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
/*
* Trapped cp14 registers. We generally ignore most of the external
* debug, on the principle that they don't really make sense to a
- * guest. Revisit this one day, whould this principle change.
+ * guest. Revisit this one day, would this principle change.
*/
static const struct sys_reg_desc cp14_regs[] = {
/* DBGIDR */
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+ trace_kvm_handle_sys_reg(esr);
+
params.is_aarch32 = false;
params.is_32bit = false;
params.Op0 = (esr >> 20) & 3;
if (!r)
return get_invariant_sys_reg(reg->id, uaddr);
+ if (r->get_user)
+ return (r->get_user)(vcpu, r, reg, uaddr);
+
return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
}
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
+ if (r->set_user)
+ return (r->set_user)(vcpu, r, reg, uaddr);
+
return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
}
/* Value (usually reset value) */
u64 val;
+
+ /* Custom get/set_user functions, fallback to generic if NULL */
+ int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr);
+ int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr);
};
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
&genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_GENERIC_V8,
+ &genericv8_target_table);
return 0;
}
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
+TRACE_EVENT(kvm_arm_setup_debug,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+ TP_ARGS(vcpu, guest_debug),
+
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_clear_debug,
+ TP_PROTO(__u32 guest_debug),
+ TP_ARGS(guest_debug),
+
+ TP_STRUCT__entry(
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("flags: 0x%08x", __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_set_dreg32,
+ TP_PROTO(const char *name, __u32 value),
+ TP_ARGS(name, value),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(__u32, value)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->value = value;
+ ),
+
+ TP_printk("%s: 0x%08x", __entry->name, __entry->value)
+);
+
+TRACE_EVENT(kvm_arm_set_regset,
+ TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
+ TP_ARGS(type, len, control, value),
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, len)
+ __array(u64, ctrls, 16)
+ __array(u64, values, 16)
+ ),
+ TP_fast_assign(
+ __entry->name = type;
+ __entry->len = len;
+ memcpy(__entry->ctrls, control, len << 3);
+ memcpy(__entry->values, value, len << 3);
+ ),
+ TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
+ __print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
+ __print_array(__entry->values, __entry->len, sizeof(__u64)))
+);
+
+TRACE_EVENT(trap_reg,
+ TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
+ TP_ARGS(fn, reg, is_write, write_value),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(int, reg)
+ __field(bool, is_write)
+ __field(u64, write_value)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->reg = reg;
+ __entry->is_write = is_write;
+ __entry->write_value = write_value;
+ ),
+
+ TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+);
+
+TRACE_EVENT(kvm_handle_sys_reg,
+ TP_PROTO(unsigned long hsr),
+ TP_ARGS(hsr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, hsr)
+ ),
+
+ TP_fast_assign(
+ __entry->hsr = hsr;
+ ),
+
+ TP_printk("HSR 0x%08lx", __entry->hsr)
+);
+
+TRACE_EVENT(kvm_set_guest_debug,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+ TP_ARGS(vcpu, guest_debug),
+
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+
#endif /* _TRACE_ARM64_KVM_H */
#undef TRACE_INCLUDE_PATH
#ifndef _ASM_SIGNAL_H
#define _ASM_SIGNAL_H
-#include <uapi/asm/registers.h>
-
extern unsigned long __rt_sigtramp_template[2];
void do_signal(struct pt_regs *regs);
return 0;
}
-/*
- * Sets the mode (periodic, shutdown, oneshot, etc) of a timer.
- */
-static void set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_SHUTDOWN:
- /* XXX implement me */
- default:
- break;
- }
-}
-
#ifdef CONFIG_SMP
/* Broadcast mechanism */
static void broadcast(const struct cpumask *mask)
}
#endif
+/* XXX Implement set_state_shutdown() */
static struct clock_event_device hexagon_clockevent_dev = {
.name = "clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 400,
.irq = RTOS_TIMER_INT,
.set_next_event = set_next_event,
- .set_mode = set_mode,
#ifdef CONFIG_SMP
.broadcast = broadcast,
#endif
dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
dummy_clock_dev->cpumask = cpumask_of(cpu);
- dummy_clock_dev->mode = CLOCK_EVT_MODE_UNUSED;
clockevents_register_device(dummy_clock_dev);
}
for_each_active_irq(i) {
struct irq_data *data = irq_get_irq_data(i);
+ struct cpumask *mask;
unsigned int newcpu;
if (irqd_is_per_cpu(data))
continue;
- if (!cpumask_test_cpu(cpu, data->affinity))
+ mask = irq_data_get_affinity_mask(data);
+ if (!cpumask_test_cpu(cpu, mask))
continue;
- newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
+ newcpu = cpumask_any_and(mask, cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, cpu);
- cpumask_setall(data->affinity);
+ cpumask_setall(mask);
}
- irq_set_affinity(i, data->affinity);
+ irq_set_affinity(i, mask);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
#ifndef _UAPI_ASM_MICROBLAZE_ELF_H
#define _UAPI_ASM_MICROBLAZE_ELF_H
+#include <linux/elf-em.h>
+
/*
* Note there is no "official" ELF designation for Microblaze.
* I've snaffled the value from the microblaze binutils source code
* /binutils/microblaze/include/elf/microblaze.h
*/
-#define EM_MICROBLAZE 189
#define EM_MICROBLAZE_OLD 0xbaab
#define ELF_ARCH EM_MICROBLAZE
--- /dev/null
+/*
+ * Copyright (C) 2015 Altera Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "Altera NiosII Max10";
+ compatible = "altr,niosii-max10";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu: cpu@0 {
+ device_type = "cpu";
+ compatible = "altr,nios2-1.1";
+ reg = <0x00000000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ altr,exception-addr = <0xc8000120>;
+ altr,fast-tlb-miss-addr = <0xc0000100>;
+ altr,has-div = <1>;
+ altr,has-initda = <1>;
+ altr,has-mmu = <1>;
+ altr,has-mul = <1>;
+ altr,implementation = "fast";
+ altr,pid-num-bits = <8>;
+ altr,reset-addr = <0xd4000000>;
+ altr,tlb-num-entries = <256>;
+ altr,tlb-num-ways = <16>;
+ altr,tlb-ptr-sz = <8>;
+ clock-frequency = <75000000>;
+ dcache-line-size = <32>;
+ dcache-size = <32768>;
+ icache-line-size = <32>;
+ icache-size = <32768>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x08000000 0x08000000>,
+ <0x00000000 0x00000400>;
+ };
+
+ sopc0: sopc@0 {
+ device_type = "soc";
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "altr,avalon", "simple-bus";
+ bus-frequency = <75000000>;
+
+ jtag_uart: serial@18001530 {
+ compatible = "altr,juart-1.0";
+ reg = <0x18001530 0x00000008>;
+ interrupt-parent = <&cpu>;
+ interrupts = <7>;
+ };
+
+ a_16550_uart_0: serial@18001600 {
+ compatible = "altr,16550-FIFO32", "ns16550a";
+ reg = <0x18001600 0x00000200>;
+ interrupt-parent = <&cpu>;
+ interrupts = <1>;
+ auto-flow-control = <1>;
+ clock-frequency = <50000000>;
+ fifo-size = <32>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ };
+
+ sysid: sysid@18001528 {
+ compatible = "altr,sysid-1.0";
+ reg = <0x18001528 0x00000008>;
+ id = <4207856382>;
+ timestamp = <1431309290>;
+ };
+
+ rgmii_0_eth_tse_0: ethernet@400 {
+ compatible = "altr,tse-msgdma-1.0", "altr,tse-1.0";
+ reg = <0x00000400 0x00000400>,
+ <0x00000820 0x00000020>,
+ <0x00000800 0x00000020>,
+ <0x000008c0 0x00000008>,
+ <0x00000840 0x00000020>,
+ <0x00000860 0x00000020>;
+ reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc";
+ interrupt-parent = <&cpu>;
+ interrupts = <2 3>;
+ interrupt-names = "rx_irq", "tx_irq";
+ rx-fifo-depth = <8192>;
+ tx-fifo-depth = <8192>;
+ address-bits = <48>;
+ max-frame-size = <1518>;
+ local-mac-address = [00 00 00 00 00 00];
+ altr,has-supplementary-unicast;
+ altr,enable-sup-addr = <1>;
+ altr,has-hash-multicast-filter;
+ altr,enable-hash = <1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy0>;
+ rgmii_0_eth_tse_0_mdio: mdio {
+ compatible = "altr,tse-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ device_type = "ethernet-phy";
+ };
+ };
+ };
+
+ enet_pll: clock@0 {
+ compatible = "altr,pll-1.0";
+ #clock-cells = <1>;
+
+ enet_pll_c0: enet_pll_c0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "enet_pll-c0";
+ };
+
+ enet_pll_c1: enet_pll_c1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "enet_pll-c1";
+ };
+
+ enet_pll_c2: enet_pll_c2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <2500000>;
+ clock-output-names = "enet_pll-c2";
+ };
+ };
+
+ sys_pll: clock@1 {
+ compatible = "altr,pll-1.0";
+ #clock-cells = <1>;
+
+ sys_pll_c0: sys_pll_c0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "sys_pll-c0";
+ };
+
+ sys_pll_c1: sys_pll_c1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "sys_pll-c1";
+ };
+
+ sys_pll_c2: sys_pll_c2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <75000000>;
+ clock-output-names = "sys_pll-c2";
+ };
+ };
+
+ sys_clk_timer: timer@18001440 {
+ compatible = "altr,timer-1.0";
+ reg = <0x18001440 0x00000020>;
+ interrupt-parent = <&cpu>;
+ interrupts = <0>;
+ clock-frequency = <75000000>;
+ };
+
+ led_pio: gpio@180014d0 {
+ compatible = "altr,pio-1.0";
+ reg = <0x180014d0 0x00000010>;
+ altr,gpio-bank-width = <4>;
+ resetvalue = <15>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ button_pio: gpio@180014c0 {
+ compatible = "altr,pio-1.0";
+ reg = <0x180014c0 0x00000010>;
+ interrupt-parent = <&cpu>;
+ interrupts = <6>;
+ altr,gpio-bank-width = <3>;
+ altr,interrupt-type = <2>;
+ edge_type = <1>;
+ level_trigger = <0>;
+ resetvalue = <0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ sys_clk_timer_1: timer@880 {
+ compatible = "altr,timer-1.0";
+ reg = <0x00000880 0x00000020>;
+ interrupt-parent = <&cpu>;
+ interrupts = <5>;
+ clock-frequency = <75000000>;
+ };
+
+ fpga_leds: leds {
+ compatible = "gpio-leds";
+
+ led_fpga0: fpga0 {
+ label = "fpga_led0";
+ gpios = <&led_pio 0 1>;
+ };
+
+ led_fpga1: fpga1 {
+ label = "fpga_led1";
+ gpios = <&led_pio 1 1>;
+ };
+
+ led_fpga2: fpga2 {
+ label = "fpga_led2";
+ gpios = <&led_pio 2 1>;
+ };
+
+ led_fpga3: fpga3 {
+ label = "fpga_led3";
+ gpios = <&led_pio 3 1>;
+ };
+ };
+ };
+
+ chosen {
+ bootargs = "debug console=ttyS0,115200";
+ };
+};
--- /dev/null
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NIOS2_MEM_BASE=0x8000000
+CONFIG_NIOS2_HW_MUL_SUPPORT=y
+CONFIG_NIOS2_HW_DIV_SUPPORT=y
+CONFIG_CUSTOM_CACHE_SETTINGS=y
+CONFIG_NIOS2_DCACHE_SIZE=0x8000
+CONFIG_NIOS2_ICACHE_SIZE=0x8000
+# CONFIG_NIOS2_CMDLINE_IGNORE_DTB is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_NETDEVICES=y
+CONFIG_ALTERA_TSE=y
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_ALTERA_JTAGUART=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_ALTERA=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
#define INST_STW 0x15
#define INST_LDW 0x17
-static unsigned long ma_user, ma_kern, ma_skipped, ma_half, ma_word;
-
static unsigned int ma_usermode;
#define UM_WARN 0x01
#define UM_FIXUP 0x02
static inline u32 get_reg_val(struct pt_regs *fp, int reg)
{
u8 *p = ((u8 *)fp) + reg_offsets[reg];
-
return *(u32 *)p;
}
u32 isn, addr, val;
int in_kernel;
u8 a, b, d0, d1, d2, d3;
- u16 imm16;
+ s16 imm16;
unsigned int fault;
/* back up one instruction */
fp->ea -= 4;
if (fixup_exception(fp)) {
- ma_skipped++;
return;
}
fault |= __get_user(d1, (u8 *)(addr+1));
val = (d1 << 8) | d0;
put_reg_val(fp, b, val);
- ma_half++;
break;
case INST_STH:
val = get_reg_val(fp, b);
d1 = val >> 8;
d0 = val >> 0;
-
- pr_debug("sth: ra=%d (%08x) rb=%d (%08x), imm16 %04x addr %08x val %08x\n",
- a, get_reg_val(fp, a),
- b, get_reg_val(fp, b),
- imm16, addr, val);
-
if (in_kernel) {
*(u8 *)(addr+0) = d0;
*(u8 *)(addr+1) = d1;
fault |= __put_user(d0, (u8 *)(addr+0));
fault |= __put_user(d1, (u8 *)(addr+1));
}
- ma_half++;
break;
case INST_LDH:
fault |= __get_user(d0, (u8 *)(addr+0));
fault |= __get_user(d1, (u8 *)(addr+1));
val = (short)((d1 << 8) | d0);
put_reg_val(fp, b, val);
- ma_half++;
break;
case INST_STW:
val = get_reg_val(fp, b);
fault |= __put_user(d2, (u8 *)(addr+2));
fault |= __put_user(d3, (u8 *)(addr+3));
}
- ma_word++;
break;
case INST_LDW:
fault |= __get_user(d0, (u8 *)(addr+0));
fault |= __get_user(d3, (u8 *)(addr+3));
val = (d3 << 24) | (d2 << 16) | (d1 << 8) | d0;
put_reg_val(fp, b, val);
- ma_word++;
break;
}
}
* note exception and skip bad instruction (return)
*/
if (in_kernel) {
- ma_kern++;
fp->ea += 4;
if (ma_usermode & KM_WARN) {
return;
}
- ma_user++;
-
/*
* user mode -
* possibly warn,
}
static void nios2_timer_config(struct nios2_timer *timer, unsigned long period,
- enum clock_event_mode mode)
+ bool periodic)
{
u16 ctrl;
timer_writew(timer, period >> 16, ALTERA_TIMER_PERIODH_REG);
ctrl |= ALTERA_TIMER_CONTROL_START_MSK | ALTERA_TIMER_CONTROL_ITO_MSK;
- if (mode == CLOCK_EVT_MODE_PERIODIC)
+ if (periodic)
ctrl |= ALTERA_TIMER_CONTROL_CONT_MSK;
else
ctrl &= ~ALTERA_TIMER_CONTROL_CONT_MSK;
{
struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
- nios2_timer_config(&nios2_ced->timer, delta, evt->mode);
+ nios2_timer_config(&nios2_ced->timer, delta, false);
return 0;
}
-static void nios2_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int nios2_timer_shutdown(struct clock_event_device *evt)
+{
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+ struct nios2_timer *timer = &nios2_ced->timer;
+
+ nios2_timer_stop(timer);
+ return 0;
+}
+
+static int nios2_timer_set_periodic(struct clock_event_device *evt)
{
unsigned long period;
struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
struct nios2_timer *timer = &nios2_ced->timer;
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- period = DIV_ROUND_UP(timer->freq, HZ);
- nios2_timer_config(timer, period, CLOCK_EVT_MODE_PERIODIC);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- nios2_timer_stop(timer);
- break;
- case CLOCK_EVT_MODE_RESUME:
- nios2_timer_start(timer);
- break;
- }
+ period = DIV_ROUND_UP(timer->freq, HZ);
+ nios2_timer_config(timer, period, true);
+ return 0;
+}
+
+static int nios2_timer_resume(struct clock_event_device *evt)
+{
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+ struct nios2_timer *timer = &nios2_ced->timer;
+
+ nios2_timer_start(timer);
+ return 0;
}
irqreturn_t timer_interrupt(int irq, void *dev_id)
.rating = 250,
.shift = 32,
.set_next_event = nios2_timer_set_next_event,
- .set_mode = nios2_timer_set_mode,
+ .set_state_shutdown = nios2_timer_shutdown,
+ .set_state_periodic = nios2_timer_set_periodic,
+ .set_state_oneshot = nios2_timer_shutdown,
+ .tick_resume = nios2_timer_resume,
},
};
bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode);
+extern void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize);
extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
return vcpu->arch.cr;
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
#define XICS_MFRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */
+/* Maximum number of threads per physical core */
+#define MAX_SMT_THREADS 8
+
+/* Maximum number of subcores per physical core */
+#define MAX_SUBCORES 4
+
#ifdef __ASSEMBLY__
#ifdef CONFIG_KVM_BOOK3S_HANDLER
#else /*__ASSEMBLY__ */
+struct kvmppc_vcore;
+
+/* Struct used for coordinating micro-threading (split-core) mode changes */
+struct kvm_split_mode {
+ unsigned long rpr;
+ unsigned long pmmar;
+ unsigned long ldbar;
+ u8 subcore_size;
+ u8 do_nap;
+ u8 napped[MAX_SMT_THREADS];
+ struct kvmppc_vcore *master_vcs[MAX_SUBCORES];
+};
+
/*
* This struct goes in the PACA on 64-bit processors. It is used
* to store host state that needs to be saved when we enter a guest
u64 host_spurr;
u64 host_dscr;
u64 dec_expires;
+ struct kvm_split_mode *kvm_split_mode;
#endif
#ifdef CONFIG_PPC_BOOK3S_64
u64 cfar;
bool in_use;
ulong gpr[14];
u32 cr;
- u32 xer;
+ ulong xer;
ulong ctr;
ulong lr;
ulong pc;
return vcpu->arch.cr;
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
*/
#define KVMPPC_RMAP_LOCK_BIT 63
#define KVMPPC_RMAP_RC_SHIFT 32
+#define KVMPPC_RMAP_CHG_SHIFT 48
#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
#define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_CHG_ORDER (0x3ful << KVMPPC_RMAP_CHG_SHIFT)
#define KVMPPC_RMAP_PRESENT 0x100000000ul
#define KVMPPC_RMAP_INDEX 0xfffffffful
u16 last_cpu;
u8 vcore_state;
u8 in_guest;
+ struct kvmppc_vcore *master_vcore;
struct list_head runnable_threads;
+ struct list_head preempt_list;
spinlock_t lock;
wait_queue_head_t wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
-/* Values for vcore_state */
+/* This bit is used when a vcore exit is triggered from outside the vcore */
+#define VCORE_EXIT_REQ 0x10000
+
+/*
+ * Values for vcore_state.
+ * Note that these are arranged such that lower values
+ * (< VCORE_SLEEPING) don't require stolen time accounting
+ * on load/unload, and higher values do.
+ */
#define VCORE_INACTIVE 0
-#define VCORE_SLEEPING 1
-#define VCORE_PREEMPT 2
-#define VCORE_RUNNING 3
-#define VCORE_EXITING 4
+#define VCORE_PREEMPT 1
+#define VCORE_PIGGYBACK 2
+#define VCORE_SLEEPING 3
+#define VCORE_RUNNING 4
+#define VCORE_EXITING 5
/*
* Struct used to manage memory for a virtual processor area
ulong ciabr;
ulong cfar;
ulong ppr;
- ulong pspb;
+ u32 pspb;
ulong fscr;
ulong shadow_fscr;
ulong ebbhr;
int trap;
int state;
int ptid;
+ int thread_cpu;
bool timer_running;
wait_queue_head_t cpu_run;
/* POWER8 Micro Partition Prefetch (MPP) parameters */
/* Address mask is common for LOGMPP instruction and MPPR SPR */
-#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000
+#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
/* Bits 60 and 61 of MPP SPR should be set to one of the following */
/* Aborting the fetch is indeed setting 00 in the table size bits */
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst));
+ DEFINE(VCPU_CPU, offsetof(struct kvm_vcpu, cpu));
+ DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
HSTATE_FIELD(HSTATE_DSCR, host_dscr);
HSTATE_FIELD(HSTATE_DABR, dabr);
HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
+ HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode);
DEFINE(IPI_PRIORITY, IPI_PRIORITY);
+ DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr));
+ DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar));
+ DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar));
+ DEFINE(KVM_SPLIT_SIZE, offsetof(struct kvm_split_mode, subcore_size));
+ DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap));
+ DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped));
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_PPC_BOOK3S_64
If unsure, say N.
config KVM_BOOK3S_64_HV
- tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+ tristate "KVM for POWER7 and later using hypervisor mode in host"
depends on KVM_BOOK3S_64 && PPC_POWERNV
select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER
select CMA
---help---
Support running unmodified book3s_64 guest kernels in
- virtual machines on POWER7 and PPC970 processors that have
+ virtual machines on POWER7 and newer processors that have
hypervisor mode available to the host.
If you say Y here, KVM will use the hardware virtualization
guest operating systems will run at full hardware speed
using supervisor and user modes. However, this also means
that KVM is not usable under PowerVM (pHyp), is only usable
- on POWER7 (or later) processors and PPC970-family processors,
- and cannot emulate a different processor from the host processor.
+ on POWER7 or later processors, and cannot emulate a
+ different processor from the host processor.
If unsure, say N.
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
}
-int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
+static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
+ unsigned int priority)
{
int deliver = 1;
int vec = 0;
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
+#include "book3s.h"
/* #define DEBUG_MMU */
/* #define DEBUG_SR */
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
#include "trace_pr.h"
+#include "book3s.h"
#define PTE_SIZE 12
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+ if (rcbits & HPTE_R_C)
+ kvmppc_update_rmap_change(rmapp, psize);
if (rcbits & ~rev[i].guest_rpte) {
rev[i].guest_rpte = ptel | rcbits;
note_hpte_modification(kvm, &rev[i]);
retry:
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_CHANGED) {
- *rmapp &= ~KVMPPC_RMAP_CHANGED;
+ long change_order = (*rmapp & KVMPPC_RMAP_CHG_ORDER)
+ >> KVMPPC_RMAP_CHG_SHIFT;
+ *rmapp &= ~(KVMPPC_RMAP_CHANGED | KVMPPC_RMAP_CHG_ORDER);
npages_dirty = 1;
+ if (change_order > PAGE_SHIFT)
+ npages_dirty = 1ul << (change_order - PAGE_SHIFT);
}
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
#include <asm/reg.h>
#include <asm/switch_to.h>
#include <asm/time.h>
+#include "book3s.h"
#define OP_19_XOP_RFID 18
#define OP_19_XOP_RFI 50
#define MPP_BUFFER_ORDER 3
#endif
+static int dynamic_mt_modes = 6;
+module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
+static int target_smt_mode;
+module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
- int cpu = vcpu->cpu;
+ int cpu;
wait_queue_head_t *wqp;
wqp = kvm_arch_vcpu_wq(vcpu);
++vcpu->stat.halt_wakeup;
}
- if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))
+ if (kvmppc_ipi_thread(vcpu->arch.thread_cpu))
return;
/* CPU points to the first thread of the core */
+ cpu = vcpu->cpu;
if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
}
* they should never fail.)
*/
+static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
+ vc->preempt_tb = mftb();
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
+}
+
+static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
+ if (vc->preempt_tb != TB_NIL) {
+ vc->stolen_tb += mftb() - vc->preempt_tb;
+ vc->preempt_tb = TB_NIL;
+ }
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
+}
+
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
* vcpu, and once it is set to this vcpu, only this task
* ever sets it to NULL.
*/
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
- spin_lock_irqsave(&vc->stoltb_lock, flags);
- if (vc->preempt_tb != TB_NIL) {
- vc->stolen_tb += mftb() - vc->preempt_tb;
- vc->preempt_tb = TB_NIL;
- }
- spin_unlock_irqrestore(&vc->stoltb_lock, flags);
- }
+ if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
+ kvmppc_core_end_stolen(vc);
+
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) {
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
- spin_lock_irqsave(&vc->stoltb_lock, flags);
- vc->preempt_tb = mftb();
- spin_unlock_irqrestore(&vc->stoltb_lock, flags);
- }
+ if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
+ kvmppc_core_start_stolen(vc);
+
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb();
kvmppc_end_cede(vcpu);
}
-void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
+static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
{
vcpu->arch.pvr = pvr;
}
-int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
+static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
unsigned long pcr = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
return 0;
}
-void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.last_inst);
}
-struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
int r;
struct kvm_vcpu *v, *ret = NULL;
spin_lock(&vcore->lock);
if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
- vcore->vcore_state != VCORE_INACTIVE)
+ vcore->vcore_state != VCORE_INACTIVE &&
+ vcore->runner)
target = vcore->runner;
spin_unlock(&vcore->lock);
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * threads_per_subcore;
vcore->kvm = kvm;
+ INIT_LIST_HEAD(&vcore->preempt_list);
vcore->mpp_buffer_is_valid = false;
spin_unlock(&vcore->lock);
vcpu->arch.vcore = vcore;
vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
+ vcpu->arch.thread_cpu = -1;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
/* Ensure the thread won't go into the kernel if it wakes */
tpaca->kvm_hstate.kvm_vcpu = NULL;
+ tpaca->kvm_hstate.kvm_vcore = NULL;
tpaca->kvm_hstate.napping = 0;
smp_wmb();
tpaca->kvm_hstate.hwthread_req = 1;
tpaca = &paca[cpu];
tpaca->kvm_hstate.hwthread_req = 0;
tpaca->kvm_hstate.kvm_vcpu = NULL;
+ tpaca->kvm_hstate.kvm_vcore = NULL;
+ tpaca->kvm_hstate.kvm_split_mode = NULL;
}
-static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
+static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
{
int cpu;
struct paca_struct *tpaca;
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ struct kvmppc_vcore *mvc = vc->master_vcore;
- if (vcpu->arch.timer_running) {
- hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
- vcpu->arch.timer_running = 0;
+ cpu = vc->pcpu;
+ if (vcpu) {
+ if (vcpu->arch.timer_running) {
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+ vcpu->arch.timer_running = 0;
+ }
+ cpu += vcpu->arch.ptid;
+ vcpu->cpu = mvc->pcpu;
+ vcpu->arch.thread_cpu = cpu;
}
- cpu = vc->pcpu + vcpu->arch.ptid;
tpaca = &paca[cpu];
- tpaca->kvm_hstate.kvm_vcore = vc;
- tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
- vcpu->cpu = vc->pcpu;
- /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
- smp_wmb();
tpaca->kvm_hstate.kvm_vcpu = vcpu;
+ tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
+ /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
+ smp_wmb();
+ tpaca->kvm_hstate.kvm_vcore = mvc;
if (cpu != smp_processor_id())
kvmppc_ipi_thread(cpu);
}
for (loops = 0; loops < 1000000; ++loops) {
/*
* Check if all threads are finished.
- * We set the vcpu pointer when starting a thread
+ * We set the vcore pointer when starting a thread
* and the thread clears it when finished, so we look
- * for any threads that still have a non-NULL vcpu ptr.
+ * for any threads that still have a non-NULL vcore ptr.
*/
for (i = 1; i < threads_per_subcore; ++i)
- if (paca[cpu + i].kvm_hstate.kvm_vcpu)
+ if (paca[cpu + i].kvm_hstate.kvm_vcore)
break;
if (i == threads_per_subcore) {
HMT_medium();
}
HMT_medium();
for (i = 1; i < threads_per_subcore; ++i)
- if (paca[cpu + i].kvm_hstate.kvm_vcpu)
+ if (paca[cpu + i].kvm_hstate.kvm_vcore)
pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
}
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
}
+/*
+ * A list of virtual cores for each physical CPU.
+ * These are vcores that could run but their runner VCPU tasks are
+ * (or may be) preempted.
+ */
+struct preempted_vcore_list {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
+
+static void init_vcore_lists(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
+ spin_lock_init(&lp->lock);
+ INIT_LIST_HEAD(&lp->list);
+ }
+}
+
+static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
+{
+ struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
+
+ vc->vcore_state = VCORE_PREEMPT;
+ vc->pcpu = smp_processor_id();
+ if (vc->num_threads < threads_per_subcore) {
+ spin_lock(&lp->lock);
+ list_add_tail(&vc->preempt_list, &lp->list);
+ spin_unlock(&lp->lock);
+ }
+
+ /* Start accumulating stolen time */
+ kvmppc_core_start_stolen(vc);
+}
+
+static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
+{
+ struct preempted_vcore_list *lp;
+
+ kvmppc_core_end_stolen(vc);
+ if (!list_empty(&vc->preempt_list)) {
+ lp = &per_cpu(preempted_vcores, vc->pcpu);
+ spin_lock(&lp->lock);
+ list_del_init(&vc->preempt_list);
+ spin_unlock(&lp->lock);
+ }
+ vc->vcore_state = VCORE_INACTIVE;
+}
+
+/*
+ * This stores information about the virtual cores currently
+ * assigned to a physical core.
+ */
+struct core_info {
+ int n_subcores;
+ int max_subcore_threads;
+ int total_threads;
+ int subcore_threads[MAX_SUBCORES];
+ struct kvm *subcore_vm[MAX_SUBCORES];
+ struct list_head vcs[MAX_SUBCORES];
+};
+
+/*
+ * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
+ * respectively in 2-way micro-threading (split-core) mode.
+ */
+static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
+
+static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
+{
+ int sub;
+
+ memset(cip, 0, sizeof(*cip));
+ cip->n_subcores = 1;
+ cip->max_subcore_threads = vc->num_threads;
+ cip->total_threads = vc->num_threads;
+ cip->subcore_threads[0] = vc->num_threads;
+ cip->subcore_vm[0] = vc->kvm;
+ for (sub = 0; sub < MAX_SUBCORES; ++sub)
+ INIT_LIST_HEAD(&cip->vcs[sub]);
+ list_add_tail(&vc->preempt_list, &cip->vcs[0]);
+}
+
+static bool subcore_config_ok(int n_subcores, int n_threads)
+{
+ /* Can only dynamically split if unsplit to begin with */
+ if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
+ return false;
+ if (n_subcores > MAX_SUBCORES)
+ return false;
+ if (n_subcores > 1) {
+ if (!(dynamic_mt_modes & 2))
+ n_subcores = 4;
+ if (n_subcores > 2 && !(dynamic_mt_modes & 4))
+ return false;
+ }
+
+ return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
+}
+
+static void init_master_vcore(struct kvmppc_vcore *vc)
+{
+ vc->master_vcore = vc;
+ vc->entry_exit_map = 0;
+ vc->in_guest = 0;
+ vc->napping_threads = 0;
+ vc->conferring_threads = 0;
+}
+
+/*
+ * See if the existing subcores can be split into 3 (or fewer) subcores
+ * of at most two threads each, so we can fit in another vcore. This
+ * assumes there are at most two subcores and at most 6 threads in total.
+ */
+static bool can_split_piggybacked_subcores(struct core_info *cip)
+{
+ int sub, new_sub;
+ int large_sub = -1;
+ int thr;
+ int n_subcores = cip->n_subcores;
+ struct kvmppc_vcore *vc, *vcnext;
+ struct kvmppc_vcore *master_vc = NULL;
+
+ for (sub = 0; sub < cip->n_subcores; ++sub) {
+ if (cip->subcore_threads[sub] <= 2)
+ continue;
+ if (large_sub >= 0)
+ return false;
+ large_sub = sub;
+ vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
+ preempt_list);
+ if (vc->num_threads > 2)
+ return false;
+ n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
+ }
+ if (n_subcores > 3 || large_sub < 0)
+ return false;
+
+ /*
+ * Seems feasible, so go through and move vcores to new subcores.
+ * Note that when we have two or more vcores in one subcore,
+ * all those vcores must have only one thread each.
+ */
+ new_sub = cip->n_subcores;
+ thr = 0;
+ sub = large_sub;
+ list_for_each_entry_safe(vc, vcnext, &cip->vcs[sub], preempt_list) {
+ if (thr >= 2) {
+ list_del(&vc->preempt_list);
+ list_add_tail(&vc->preempt_list, &cip->vcs[new_sub]);
+ /* vc->num_threads must be 1 */
+ if (++cip->subcore_threads[new_sub] == 1) {
+ cip->subcore_vm[new_sub] = vc->kvm;
+ init_master_vcore(vc);
+ master_vc = vc;
+ ++cip->n_subcores;
+ } else {
+ vc->master_vcore = master_vc;
+ ++new_sub;
+ }
+ }
+ thr += vc->num_threads;
+ }
+ cip->subcore_threads[large_sub] = 2;
+ cip->max_subcore_threads = 2;
+
+ return true;
+}
+
+static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
+{
+ int n_threads = vc->num_threads;
+ int sub;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return false;
+
+ if (n_threads < cip->max_subcore_threads)
+ n_threads = cip->max_subcore_threads;
+ if (subcore_config_ok(cip->n_subcores + 1, n_threads)) {
+ cip->max_subcore_threads = n_threads;
+ } else if (cip->n_subcores <= 2 && cip->total_threads <= 6 &&
+ vc->num_threads <= 2) {
+ /*
+ * We may be able to fit another subcore in by
+ * splitting an existing subcore with 3 or 4
+ * threads into two 2-thread subcores, or one
+ * with 5 or 6 threads into three subcores.
+ * We can only do this if those subcores have
+ * piggybacked virtual cores.
+ */
+ if (!can_split_piggybacked_subcores(cip))
+ return false;
+ } else {
+ return false;
+ }
+
+ sub = cip->n_subcores;
+ ++cip->n_subcores;
+ cip->total_threads += vc->num_threads;
+ cip->subcore_threads[sub] = vc->num_threads;
+ cip->subcore_vm[sub] = vc->kvm;
+ init_master_vcore(vc);
+ list_del(&vc->preempt_list);
+ list_add_tail(&vc->preempt_list, &cip->vcs[sub]);
+
+ return true;
+}
+
+static bool can_piggyback_subcore(struct kvmppc_vcore *pvc,
+ struct core_info *cip, int sub)
+{
+ struct kvmppc_vcore *vc;
+ int n_thr;
+
+ vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
+ preempt_list);
+
+ /* require same VM and same per-core reg values */
+ if (pvc->kvm != vc->kvm ||
+ pvc->tb_offset != vc->tb_offset ||
+ pvc->pcr != vc->pcr ||
+ pvc->lpcr != vc->lpcr)
+ return false;
+
+ /* P8 guest with > 1 thread per core would see wrong TIR value */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ (vc->num_threads > 1 || pvc->num_threads > 1))
+ return false;
+
+ n_thr = cip->subcore_threads[sub] + pvc->num_threads;
+ if (n_thr > cip->max_subcore_threads) {
+ if (!subcore_config_ok(cip->n_subcores, n_thr))
+ return false;
+ cip->max_subcore_threads = n_thr;
+ }
+
+ cip->total_threads += pvc->num_threads;
+ cip->subcore_threads[sub] = n_thr;
+ pvc->master_vcore = vc;
+ list_del(&pvc->preempt_list);
+ list_add_tail(&pvc->preempt_list, &cip->vcs[sub]);
+
+ return true;
+}
+
+/*
+ * Work out whether it is possible to piggyback the execution of
+ * vcore *pvc onto the execution of the other vcores described in *cip.
+ */
+static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
+ int target_threads)
+{
+ int sub;
+
+ if (cip->total_threads + pvc->num_threads > target_threads)
+ return false;
+ for (sub = 0; sub < cip->n_subcores; ++sub)
+ if (cip->subcore_threads[sub] &&
+ can_piggyback_subcore(pvc, cip, sub))
+ return true;
+
+ if (can_dynamic_split(pvc, cip))
+ return true;
+
+ return false;
+}
+
static void prepare_threads(struct kvmppc_vcore *vc)
{
struct kvm_vcpu *vcpu, *vnext;
}
}
-static void post_guest_process(struct kvmppc_vcore *vc)
+static void collect_piggybacks(struct core_info *cip, int target_threads)
+{
+ struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
+ struct kvmppc_vcore *pvc, *vcnext;
+
+ spin_lock(&lp->lock);
+ list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
+ if (!spin_trylock(&pvc->lock))
+ continue;
+ prepare_threads(pvc);
+ if (!pvc->n_runnable) {
+ list_del_init(&pvc->preempt_list);
+ if (pvc->runner == NULL) {
+ pvc->vcore_state = VCORE_INACTIVE;
+ kvmppc_core_end_stolen(pvc);
+ }
+ spin_unlock(&pvc->lock);
+ continue;
+ }
+ if (!can_piggyback(pvc, cip, target_threads)) {
+ spin_unlock(&pvc->lock);
+ continue;
+ }
+ kvmppc_core_end_stolen(pvc);
+ pvc->vcore_state = VCORE_PIGGYBACK;
+ if (cip->total_threads >= target_threads)
+ break;
+ }
+ spin_unlock(&lp->lock);
+}
+
+static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
{
+ int still_running = 0;
u64 now;
long ret;
struct kvm_vcpu *vcpu, *vnext;
+ spin_lock(&vc->lock);
now = get_tb();
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
- if (vcpu->arch.ceded) {
- if (!is_kvmppc_resume_guest(ret))
- kvmppc_end_cede(vcpu);
- else
+ if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
+ if (vcpu->arch.pending_exceptions)
+ kvmppc_core_prepare_to_enter(vcpu);
+ if (vcpu->arch.ceded)
kvmppc_set_timer(vcpu);
- }
- if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
+ else
+ ++still_running;
+ } else {
kvmppc_remove_runnable(vc, vcpu);
wake_up(&vcpu->arch.cpu_run);
}
}
+ list_del_init(&vc->preempt_list);
+ if (!is_master) {
+ if (still_running > 0) {
+ kvmppc_vcore_preempt(vc);
+ } else if (vc->runner) {
+ vc->vcore_state = VCORE_PREEMPT;
+ kvmppc_core_start_stolen(vc);
+ } else {
+ vc->vcore_state = VCORE_INACTIVE;
+ }
+ if (vc->n_runnable > 0 && vc->runner == NULL) {
+ /* make sure there's a candidate runner awake */
+ vcpu = list_first_entry(&vc->runnable_threads,
+ struct kvm_vcpu, arch.run_list);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ }
+ spin_unlock(&vc->lock);
}
/*
struct kvm_vcpu *vcpu, *vnext;
int i;
int srcu_idx;
+ struct core_info core_info;
+ struct kvmppc_vcore *pvc, *vcnext;
+ struct kvm_split_mode split_info, *sip;
+ int split, subcore_size, active;
+ int sub;
+ bool thr0_done;
+ unsigned long cmd_bit, stat_bit;
+ int pcpu, thr;
+ int target_threads;
/*
* Remove from the list any threads that have a signal pending
/*
* Initialize *vc.
*/
- vc->entry_exit_map = 0;
+ init_master_vcore(vc);
vc->preempt_tb = TB_NIL;
- vc->in_guest = 0;
- vc->napping_threads = 0;
- vc->conferring_threads = 0;
/*
* Make sure we are running on primary threads, and that secondary
goto out;
}
+ /*
+ * See if we could run any other vcores on the physical core
+ * along with this one.
+ */
+ init_core_info(&core_info, vc);
+ pcpu = smp_processor_id();
+ target_threads = threads_per_subcore;
+ if (target_smt_mode && target_smt_mode < target_threads)
+ target_threads = target_smt_mode;
+ if (vc->num_threads < target_threads)
+ collect_piggybacks(&core_info, target_threads);
+
+ /* Decide on micro-threading (split-core) mode */
+ subcore_size = threads_per_subcore;
+ cmd_bit = stat_bit = 0;
+ split = core_info.n_subcores;
+ sip = NULL;
+ if (split > 1) {
+ /* threads_per_subcore must be MAX_SMT_THREADS (8) here */
+ if (split == 2 && (dynamic_mt_modes & 2)) {
+ cmd_bit = HID0_POWER8_1TO2LPAR;
+ stat_bit = HID0_POWER8_2LPARMODE;
+ } else {
+ split = 4;
+ cmd_bit = HID0_POWER8_1TO4LPAR;
+ stat_bit = HID0_POWER8_4LPARMODE;
+ }
+ subcore_size = MAX_SMT_THREADS / split;
+ sip = &split_info;
+ memset(&split_info, 0, sizeof(split_info));
+ split_info.rpr = mfspr(SPRN_RPR);
+ split_info.pmmar = mfspr(SPRN_PMMAR);
+ split_info.ldbar = mfspr(SPRN_LDBAR);
+ split_info.subcore_size = subcore_size;
+ for (sub = 0; sub < core_info.n_subcores; ++sub)
+ split_info.master_vcs[sub] =
+ list_first_entry(&core_info.vcs[sub],
+ struct kvmppc_vcore, preempt_list);
+ /* order writes to split_info before kvm_split_mode pointer */
+ smp_wmb();
+ }
+ pcpu = smp_processor_id();
+ for (thr = 0; thr < threads_per_subcore; ++thr)
+ paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
+
+ /* Initiate micro-threading (split-core) if required */
+ if (cmd_bit) {
+ unsigned long hid0 = mfspr(SPRN_HID0);
+
+ hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
+ mb();
+ mtspr(SPRN_HID0, hid0);
+ isync();
+ for (;;) {
+ hid0 = mfspr(SPRN_HID0);
+ if (hid0 & stat_bit)
+ break;
+ cpu_relax();
+ }
+ }
- vc->pcpu = smp_processor_id();
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
- kvmppc_start_thread(vcpu);
- kvmppc_create_dtl_entry(vcpu, vc);
- trace_kvm_guest_enter(vcpu);
+ /* Start all the threads */
+ active = 0;
+ for (sub = 0; sub < core_info.n_subcores; ++sub) {
+ thr = subcore_thread_map[sub];
+ thr0_done = false;
+ active |= 1 << thr;
+ list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
+ pvc->pcpu = pcpu + thr;
+ list_for_each_entry(vcpu, &pvc->runnable_threads,
+ arch.run_list) {
+ kvmppc_start_thread(vcpu, pvc);
+ kvmppc_create_dtl_entry(vcpu, pvc);
+ trace_kvm_guest_enter(vcpu);
+ if (!vcpu->arch.ptid)
+ thr0_done = true;
+ active |= 1 << (thr + vcpu->arch.ptid);
+ }
+ /*
+ * We need to start the first thread of each subcore
+ * even if it doesn't have a vcpu.
+ */
+ if (pvc->master_vcore == pvc && !thr0_done)
+ kvmppc_start_thread(NULL, pvc);
+ thr += pvc->num_threads;
+ }
}
- /* Set this explicitly in case thread 0 doesn't have a vcpu */
- get_paca()->kvm_hstate.kvm_vcore = vc;
- get_paca()->kvm_hstate.ptid = 0;
+ /*
+ * Ensure that split_info.do_nap is set after setting
+ * the vcore pointer in the PACA of the secondaries.
+ */
+ smp_mb();
+ if (cmd_bit)
+ split_info.do_nap = 1; /* ask secondaries to nap when done */
+
+ /*
+ * When doing micro-threading, poke the inactive threads as well.
+ * This gets them to the nap instruction after kvm_do_nap,
+ * which reduces the time taken to unsplit later.
+ */
+ if (split > 1)
+ for (thr = 1; thr < threads_per_subcore; ++thr)
+ if (!(active & (1 << thr)))
+ kvmppc_ipi_thread(pcpu + thr);
vc->vcore_state = VCORE_RUNNING;
preempt_disable();
trace_kvmppc_run_core(vc, 0);
- spin_unlock(&vc->lock);
+ for (sub = 0; sub < core_info.n_subcores; ++sub)
+ list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
+ spin_unlock(&pvc->lock);
kvm_guest_enter();
__kvmppc_vcore_entry();
- spin_lock(&vc->lock);
-
if (vc->mpp_buffer)
kvmppc_start_saving_l2_cache(vc);
- /* disable sending of IPIs on virtual external irqs */
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
- vcpu->cpu = -1;
- /* wait for secondary threads to finish writing their state to memory */
- kvmppc_wait_for_nap();
- for (i = 0; i < threads_per_subcore; ++i)
- kvmppc_release_hwthread(vc->pcpu + i);
+ srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
+
+ spin_lock(&vc->lock);
/* prevent other vcpu threads from doing kvmppc_start_thread() now */
vc->vcore_state = VCORE_EXITING;
- spin_unlock(&vc->lock);
- srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
+ /* wait for secondary threads to finish writing their state to memory */
+ kvmppc_wait_for_nap();
+
+ /* Return to whole-core mode if we split the core earlier */
+ if (split > 1) {
+ unsigned long hid0 = mfspr(SPRN_HID0);
+ unsigned long loops = 0;
+
+ hid0 &= ~HID0_POWER8_DYNLPARDIS;
+ stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
+ mb();
+ mtspr(SPRN_HID0, hid0);
+ isync();
+ for (;;) {
+ hid0 = mfspr(SPRN_HID0);
+ if (!(hid0 & stat_bit))
+ break;
+ cpu_relax();
+ ++loops;
+ }
+ split_info.do_nap = 0;
+ }
+
+ /* Let secondaries go back to the offline loop */
+ for (i = 0; i < threads_per_subcore; ++i) {
+ kvmppc_release_hwthread(pcpu + i);
+ if (sip && sip->napped[i])
+ kvmppc_ipi_thread(pcpu + i);
+ }
+
+ spin_unlock(&vc->lock);
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();
kvm_guest_exit();
- preempt_enable();
+ for (sub = 0; sub < core_info.n_subcores; ++sub)
+ list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
+ preempt_list)
+ post_guest_process(pvc, pvc == vc);
spin_lock(&vc->lock);
- post_guest_process(vc);
+ preempt_enable();
out:
vc->vcore_state = VCORE_INACTIVE;
* Wait for some other vcpu thread to execute us, and
* wake us up when we need to handle something in the host.
*/
-static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
+static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
+ struct kvm_vcpu *vcpu, int wait_state)
{
DEFINE_WAIT(wait);
prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
- if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
+ if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
+ spin_unlock(&vc->lock);
schedule();
+ spin_lock(&vc->lock);
+ }
finish_wait(&vcpu->arch.cpu_run, &wait);
}
* this thread straight away and have it join in.
*/
if (!signal_pending(current)) {
- if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) {
+ if (vc->vcore_state == VCORE_PIGGYBACK) {
+ struct kvmppc_vcore *mvc = vc->master_vcore;
+ if (spin_trylock(&mvc->lock)) {
+ if (mvc->vcore_state == VCORE_RUNNING &&
+ !VCORE_IS_EXITING(mvc)) {
+ kvmppc_create_dtl_entry(vcpu, vc);
+ kvmppc_start_thread(vcpu, vc);
+ trace_kvm_guest_enter(vcpu);
+ }
+ spin_unlock(&mvc->lock);
+ }
+ } else if (vc->vcore_state == VCORE_RUNNING &&
+ !VCORE_IS_EXITING(vc)) {
kvmppc_create_dtl_entry(vcpu, vc);
- kvmppc_start_thread(vcpu);
+ kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
wake_up(&vc->wq);
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
+ if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
+ kvmppc_vcore_end_preempt(vc);
+
if (vc->vcore_state != VCORE_INACTIVE) {
- spin_unlock(&vc->lock);
- kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
- spin_lock(&vc->lock);
+ kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
continue;
}
list_for_each_entry_safe(v, vn, &vc->runnable_threads,
if (n_ceded == vc->n_runnable) {
kvmppc_vcore_blocked(vc);
} else if (need_resched()) {
- vc->vcore_state = VCORE_PREEMPT;
+ kvmppc_vcore_preempt(vc);
/* Let something else run */
cond_resched_lock(&vc->lock);
- vc->vcore_state = VCORE_INACTIVE;
+ if (vc->vcore_state == VCORE_PREEMPT)
+ kvmppc_vcore_end_preempt(vc);
} else {
kvmppc_run_core(vc);
}
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
(vc->vcore_state == VCORE_RUNNING ||
- vc->vcore_state == VCORE_EXITING)) {
- spin_unlock(&vc->lock);
- kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
- spin_lock(&vc->lock);
- }
+ vc->vcore_state == VCORE_EXITING))
+ kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu);
init_default_hcalls();
+ init_vcore_lists();
+
r = kvmppc_mmu_hv_init();
return r;
}
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
+ int ptid = local_paca->kvm_hstate.ptid;
int threads_running;
int threads_ceded;
int threads_conferring;
u64 stop = get_tb() + 10 * tb_ticks_per_usec;
int rv = H_SUCCESS; /* => don't yield */
- set_bit(vcpu->arch.ptid, &vc->conferring_threads);
+ set_bit(ptid, &vc->conferring_threads);
while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
threads_running = VCORE_ENTRY_MAP(vc);
threads_ceded = vc->napping_threads;
break;
}
}
- clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
+ clear_bit(ptid, &vc->conferring_threads);
return rv;
}
{
struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
int ptid = local_paca->kvm_hstate.ptid;
- int me, ee;
+ struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
+ int me, ee, i;
/* Set our bit in the threads-exiting-guest map in the 0xff00
bits of vcore->entry_exit_map */
*/
if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
+
+ /*
+ * If we are doing dynamic micro-threading, interrupt the other
+ * subcores to pull them out of their guests too.
+ */
+ if (!sip)
+ return;
+
+ for (i = 0; i < MAX_SUBCORES; ++i) {
+ vc = sip->master_vcs[i];
+ if (!vc)
+ break;
+ do {
+ ee = vc->entry_exit_map;
+ /* Already asked to exit? */
+ if ((ee >> 8) != 0)
+ break;
+ } while (cmpxchg(&vc->entry_exit_map, ee,
+ ee | VCORE_EXIT_REQ) != ee);
+ if ((ee >> 8) == 0)
+ kvmhv_interrupt_vcore(vc, ee);
+ }
}
#include <linux/kvm_host.h>
#include <linux/hugetlb.h>
#include <linux/module.h>
+#include <linux/log2.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
}
EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
+/* Update the changed page order field of an rmap entry */
+void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize)
+{
+ unsigned long order;
+
+ if (!psize)
+ return;
+ order = ilog2(psize);
+ order <<= KVMPPC_RMAP_CHG_SHIFT;
+ if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER))
+ *rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order;
+}
+EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change);
+
+/* Returns a pointer to the revmap entry for the page mapped by a HPTE */
+static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
+ unsigned long hpte_gr)
+{
+ struct kvm_memory_slot *memslot;
+ unsigned long *rmap;
+ unsigned long gfn;
+
+ gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr));
+ memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
+ if (!memslot)
+ return NULL;
+
+ rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
+ return rmap;
+}
+
/* Remove this HPTE from the chain for a real page */
static void remove_revmap_chain(struct kvm *kvm, long pte_index,
struct revmap_entry *rev,
unsigned long hpte_v, unsigned long hpte_r)
{
struct revmap_entry *next, *prev;
- unsigned long gfn, ptel, head;
- struct kvm_memory_slot *memslot;
+ unsigned long ptel, head;
unsigned long *rmap;
unsigned long rcbits;
rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
ptel = rev->guest_rpte |= rcbits;
- gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
- memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
- if (!memslot)
+ rmap = revmap_for_hpte(kvm, hpte_v, ptel);
+ if (!rmap)
return;
-
- rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
lock_rmap(rmap);
head = *rmap & KVMPPC_RMAP_INDEX;
*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
}
*rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+ if (rcbits & HPTE_R_C)
+ kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r));
unlock_rmap(rmap);
}
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
v = pte & ~HPTE_V_HVLOCK;
if (v & HPTE_V_VALID) {
- u64 pte1;
-
- pte1 = be64_to_cpu(hpte[1]);
hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
- rb = compute_tlbie_rb(v, pte1, pte_index);
+ rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
- /* Read PTE low word after tlbie to get final R/C values */
- remove_revmap_chain(kvm, pte_index, rev, v, pte1);
+ /*
+ * The reference (R) and change (C) bits in a HPT
+ * entry can be set by hardware at any time up until
+ * the HPTE is invalidated and the TLB invalidation
+ * sequence has completed. This means that when
+ * removing a HPTE, we need to re-read the HPTE after
+ * the invalidation sequence has completed in order to
+ * obtain reliable values of R and C.
+ */
+ remove_revmap_chain(kvm, pte_index, rev, v,
+ be64_to_cpu(hpte[1]));
}
r = rev->guest_rpte & ~HPTE_GR_RESERVED;
note_hpte_modification(kvm, rev);
return H_SUCCESS;
}
+long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index)
+{
+ struct kvm *kvm = vcpu->kvm;
+ __be64 *hpte;
+ unsigned long v, r, gr;
+ struct revmap_entry *rev;
+ unsigned long *rmap;
+ long ret = H_NOT_FOUND;
+
+ if (pte_index >= kvm->arch.hpt_npte)
+ return H_PARAMETER;
+
+ rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ v = be64_to_cpu(hpte[0]);
+ r = be64_to_cpu(hpte[1]);
+ if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ goto out;
+
+ gr = rev->guest_rpte;
+ if (rev->guest_rpte & HPTE_R_R) {
+ rev->guest_rpte &= ~HPTE_R_R;
+ note_hpte_modification(kvm, rev);
+ }
+ if (v & HPTE_V_VALID) {
+ gr |= r & (HPTE_R_R | HPTE_R_C);
+ if (r & HPTE_R_R) {
+ kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
+ rmap = revmap_for_hpte(kvm, v, gr);
+ if (rmap) {
+ lock_rmap(rmap);
+ *rmap |= KVMPPC_RMAP_REFERENCED;
+ unlock_rmap(rmap);
+ }
+ }
+ }
+ vcpu->arch.gpr[4] = gr;
+ ret = H_SUCCESS;
+ out:
+ unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
+ return ret;
+}
+
+long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index)
+{
+ struct kvm *kvm = vcpu->kvm;
+ __be64 *hpte;
+ unsigned long v, r, gr;
+ struct revmap_entry *rev;
+ unsigned long *rmap;
+ long ret = H_NOT_FOUND;
+
+ if (pte_index >= kvm->arch.hpt_npte)
+ return H_PARAMETER;
+
+ rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ v = be64_to_cpu(hpte[0]);
+ r = be64_to_cpu(hpte[1]);
+ if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ goto out;
+
+ gr = rev->guest_rpte;
+ if (gr & HPTE_R_C) {
+ rev->guest_rpte &= ~HPTE_R_C;
+ note_hpte_modification(kvm, rev);
+ }
+ if (v & HPTE_V_VALID) {
+ /* need to make it temporarily absent so C is stable */
+ hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
+ kvmppc_invalidate_hpte(kvm, hpte, pte_index);
+ r = be64_to_cpu(hpte[1]);
+ gr |= r & (HPTE_R_R | HPTE_R_C);
+ if (r & HPTE_R_C) {
+ unsigned long psize = hpte_page_size(v, r);
+ hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
+ eieio();
+ rmap = revmap_for_hpte(kvm, v, gr);
+ if (rmap) {
+ lock_rmap(rmap);
+ *rmap |= KVMPPC_RMAP_CHANGED;
+ kvmppc_update_rmap_change(rmap, psize);
+ unlock_rmap(rmap);
+ }
+ }
+ }
+ vcpu->arch.gpr[4] = gr;
+ ret = H_SUCCESS;
+ out:
+ unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
+ return ret;
+}
+
void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
{
}
/* Check if the core is loaded, if not, too hard */
- cpu = vcpu->cpu;
+ cpu = vcpu->arch.thread_cpu;
if (cpu < 0 || cpu >= nr_cpu_ids) {
this_icp->rm_action |= XICS_RM_KICK_VCPU;
this_icp->rm_kick_target = vcpu;
return;
}
- /* In SMT cpu will always point to thread 0, we adjust it */
- cpu += vcpu->arch.ptid;
smp_mb();
kvmhv_rm_send_ipi(cpu);
subf r4, r4, r3
mtspr SPRN_DEC, r4
+ /* hwthread_req may have got set by cede or no vcpu, so clear it */
+ li r0, 0
+ stb r0, HSTATE_HWTHREAD_REQ(r13)
+
/*
* For external and machine check interrupts, we need
* to call the Linux handler to process the interrupt.
ld r5, HSTATE_KVM_VCORE(r13)
li r0, 0
stb r0, HSTATE_NAPPING(r13)
- stb r0, HSTATE_HWTHREAD_REQ(r13)
/* check the wake reason */
bl kvmppc_check_wake_reason
cmpdi r3, 0
bge kvm_no_guest
- /* get vcpu pointer, NULL if we have no vcpu to run */
- ld r4,HSTATE_KVM_VCPU(r13)
- cmpdi r4,0
- /* if we have no vcpu to run, go back to sleep */
+ /* get vcore pointer, NULL if we have nothing to run */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ cmpdi r5,0
+ /* if we have no vcore to run, go back to sleep */
beq kvm_no_guest
kvm_secondary_got_guest:
ld r6, PACA_DSCR_DEFAULT(r13)
std r6, HSTATE_DSCR(r13)
- /* Order load of vcore, ptid etc. after load of vcpu */
+ /* On thread 0 of a subcore, set HDEC to max */
+ lbz r4, HSTATE_PTID(r13)
+ cmpwi r4, 0
+ bne 63f
+ lis r6, 0x7fff
+ ori r6, r6, 0xffff
+ mtspr SPRN_HDEC, r6
+ /* and set per-LPAR registers, if doing dynamic micro-threading */
+ ld r6, HSTATE_SPLIT_MODE(r13)
+ cmpdi r6, 0
+ beq 63f
+ ld r0, KVM_SPLIT_RPR(r6)
+ mtspr SPRN_RPR, r0
+ ld r0, KVM_SPLIT_PMMAR(r6)
+ mtspr SPRN_PMMAR, r0
+ ld r0, KVM_SPLIT_LDBAR(r6)
+ mtspr SPRN_LDBAR, r0
+ isync
+63:
+ /* Order load of vcpu after load of vcore */
lwsync
+ ld r4, HSTATE_KVM_VCPU(r13)
bl kvmppc_hv_entry
/* Back from the guest, go back to nap */
- /* Clear our vcpu pointer so we don't come back in early */
+ /* Clear our vcpu and vcore pointers so we don't come back in early */
li r0, 0
+ std r0, HSTATE_KVM_VCPU(r13)
/*
- * Once we clear HSTATE_KVM_VCPU(r13), the code in
+ * Once we clear HSTATE_KVM_VCORE(r13), the code in
* kvmppc_run_core() is going to assume that all our vcpu
* state is visible in memory. This lwsync makes sure
* that that is true.
*/
lwsync
- std r0, HSTATE_KVM_VCPU(r13)
+ std r0, HSTATE_KVM_VCORE(r13)
/*
* At this point we have finished executing in the guest.
b power7_wakeup_loss
53: HMT_LOW
- ld r4, HSTATE_KVM_VCPU(r13)
- cmpdi r4, 0
+ ld r5, HSTATE_KVM_VCORE(r13)
+ cmpdi r5, 0
+ bne 60f
+ ld r3, HSTATE_SPLIT_MODE(r13)
+ cmpdi r3, 0
+ beq kvm_no_guest
+ lbz r0, KVM_SPLIT_DO_NAP(r3)
+ cmpwi r0, 0
beq kvm_no_guest
HMT_MEDIUM
+ b kvm_unsplit_nap
+60: HMT_MEDIUM
b kvm_secondary_got_guest
54: li r0, KVM_HWTHREAD_IN_KVM
stb r0, HSTATE_HWTHREAD_STATE(r13)
b kvm_no_guest
+/*
+ * Here the primary thread is trying to return the core to
+ * whole-core mode, so we need to nap.
+ */
+kvm_unsplit_nap:
+ /*
+ * Ensure that secondary doesn't nap when it has
+ * its vcore pointer set.
+ */
+ sync /* matches smp_mb() before setting split_info.do_nap */
+ ld r0, HSTATE_KVM_VCORE(r13)
+ cmpdi r0, 0
+ bne kvm_no_guest
+ /* clear any pending message */
+BEGIN_FTR_SECTION
+ lis r6, (PPC_DBELL_SERVER << (63-36))@h
+ PPC_MSGCLR(6)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ /* Set kvm_split_mode.napped[tid] = 1 */
+ ld r3, HSTATE_SPLIT_MODE(r13)
+ li r0, 1
+ lhz r4, PACAPACAINDEX(r13)
+ clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
+ addi r4, r4, KVM_SPLIT_NAPPED
+ stbx r0, r3, r4
+ /* Check the do_nap flag again after setting napped[] */
+ sync
+ lbz r0, KVM_SPLIT_DO_NAP(r3)
+ cmpwi r0, 0
+ beq 57f
+ li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
+ mfspr r4, SPRN_LPCR
+ rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
+ mtspr SPRN_LPCR, r4
+ isync
+ std r0, HSTATE_SCRATCH0(r13)
+ ptesync
+ ld r0, HSTATE_SCRATCH0(r13)
+1: cmpd r0, r0
+ bne 1b
+ nap
+ b .
+
+57: li r0, 0
+ stbx r0, r3, r4
+ b kvm_no_guest
+
/******************************************************************************
* *
* Entry code *
cmpwi r0, 0
bne 21f
HMT_LOW
-20: lbz r0, VCORE_IN_GUEST(r5)
+20: lwz r3, VCORE_ENTRY_EXIT(r5)
+ cmpwi r3, 0x100
+ bge no_switch_exit
+ lbz r0, VCORE_IN_GUEST(r5)
cmpwi r0, 0
beq 20b
HMT_MEDIUM
blt hdec_soon
ld r6, VCPU_CTR(r4)
- lwz r7, VCPU_XER(r4)
+ ld r7, VCPU_XER(r4)
mtctr r6
mtxer r7
#endif
11: b kvmhv_switch_to_host
+no_switch_exit:
+ HMT_MEDIUM
+ li r12, 0
+ b 12f
hdec_soon:
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
- stw r12, VCPU_TRAP(r4)
+12: stw r12, VCPU_TRAP(r4)
mr r9, r4
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
addi r3, r4, VCPU_TB_RMEXIT
mfctr r3
mfxer r4
std r3, VCPU_CTR(r9)
- stw r4, VCPU_XER(r9)
+ std r4, VCPU_XER(r9)
/* If this is a page table miss then see if it's theirs or ours */
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
beq 4f
b guest_exit_cont
3:
ld r9, HSTATE_KVM_VCPU(r13)
lwz r12, VCPU_TRAP(r9)
+ /* Stop others sending VCPU interrupts to this physical CPU */
+ li r0, -1
+ stw r0, VCPU_CPU(r9)
+ stw r0, VCPU_THREAD_CPU(r9)
+
/* Save guest CTRL register, set runlatch to 1 */
mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
/* Primary thread waits for all the secondaries to exit guest */
15: lwz r3,VCORE_ENTRY_EXIT(r5)
- srwi r0,r3,8
+ rlwinm r0,r3,32-8,0xff
clrldi r3,r3,56
cmpw r3,r0
bne 15b
isync
+ /* Did we actually switch to the guest at all? */
+ lbz r6, VCORE_IN_GUEST(r5)
+ cmpwi r6, 0
+ beq 19f
+
/* Primary thread switches back to host partition */
ld r6,KVM_HOST_SDR1(r4)
lwz r7,KVM_HOST_LPID(r4)
18:
/* Signal secondary CPUs to continue */
stb r0,VCORE_IN_GUEST(r5)
- lis r8,0x7fff /* MAX_INT@h */
+19: lis r8,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC,r8
16: ld r8,KVM_HOST_LPCR(r4)
bl kvmppc_msr_interrupt
fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9)
- lwz r8, VCPU_XER(r9)
+ ld r8, VCPU_XER(r9)
mtctr r7
mtxer r8
mr r4, r9
.long DOTSYM(kvmppc_h_remove) - hcall_real_table
.long DOTSYM(kvmppc_h_enter) - hcall_real_table
.long DOTSYM(kvmppc_h_read) - hcall_real_table
- .long 0 /* 0x10 - H_CLEAR_MOD */
- .long 0 /* 0x14 - H_CLEAR_REF */
+ .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
+ .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
.long DOTSYM(kvmppc_h_protect) - hcall_real_table
.long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
.long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
return kvmppc_get_field(inst, msb + 32, lsb + 32);
}
-bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
+static bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
{
if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
return false;
PPC_LL r8, SVCPU_CTR(r3)
PPC_LL r9, SVCPU_LR(r3)
lwz r10, SVCPU_CR(r3)
- lwz r11, SVCPU_XER(r3)
+ PPC_LL r11, SVCPU_XER(r3)
mtctr r8
mtlr r9
mfctr r8
mflr r9
- stw r5, SVCPU_XER(r13)
+ PPC_STL r5, SVCPU_XER(r13)
PPC_STL r6, SVCPU_FAULT_DAR(r13)
stw r7, SVCPU_FAULT_DSISR(r13)
PPC_STL r8, SVCPU_CTR(r13)
* =======
*
* Each ICS has a spin lock protecting the information about the IRQ
- * sources and avoiding simultaneous deliveries if the same interrupt.
+ * sources and avoiding simultaneous deliveries of the same interrupt.
*
* ICP operations are done via a single compare & swap transaction
* (most ICP state fits in the union kvmppc_icp_state)
#endif
break;
case BOOKE_INTERRUPT_CRITICAL:
+ kvmppc_fill_pt_regs(®s);
unknown_exception(®s);
break;
case BOOKE_INTERRUPT_DEBUG:
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu->arch.shared->mas1 =
(vcpu->arch.shared->mas6 & MAS6_SPID0)
- | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
+ | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0)
| (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
vcpu->arch.shared->mas2 &= MAS2_EPN;
vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
return kvmppc_core_pending_dec(vcpu);
}
-enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
+static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
{
unsigned long mfn;
+ /*
+ * Some x86 code are still using pfn_to_mfn instead of
+ * pfn_to_mfn. This will have to be removed when we figured
+ * out which call.
+ */
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
{
unsigned long pfn;
+ /*
+ * Some x86 code are still using mfn_to_pfn instead of
+ * gfn_to_pfn. This will have to be removed when we figure
+ * out which call.
+ */
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
}
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return pfn;
+ else
+ return pfn_to_mfn(pfn);
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return gfn;
+ else
+ return mfn_to_pfn(gfn);
+}
+
+/* Pseudo-physical <-> Bus conversion */
+#define pfn_to_bfn(pfn) pfn_to_gfn(pfn)
+#define bfn_to_pfn(bfn) gfn_to_pfn(bfn)
+
/*
* We detect special mappings in one of two ways:
* 1. If the MFN is an I/O page then Xen will set the m2p entry
* require. In all the cases we care about, the FOREIGN_FRAME bit is
* masked (e.g., pfn_to_mfn()) so behaviour there is correct.
*/
-static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
+static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
{
unsigned long pfn;
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
+#define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT))
+
static inline unsigned long pte_mfn(pte_t pte)
{
return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
static inline bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
- unsigned long mfn)
+ unsigned long bfn)
{
return false;
}
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
+ *linear = la;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
return emulate_gp(ctxt, 0);
- *linear = la;
return X86EMUL_CONTINUE;
bad:
if (addr.seg == VCPU_SREG_SS)
walk_shadow_page_lockless_begin(vcpu);
- for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level;
+ for (shadow_walk_init(&iterator, vcpu, addr),
+ leaf = root = iterator.level;
shadow_walk_okay(&iterator);
__shadow_walk_next(&iterator, spte)) {
- leaf = iterator.level;
spte = mmu_spte_get_lockless(iterator.sptep);
sptes[leaf - 1] = spte;
+ leaf--;
if (!is_shadow_present_pte(spte))
break;
if (reserved) {
pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
__func__, addr);
- while (root >= leaf) {
+ while (root > leaf) {
pr_err("------ spte 0x%llx level %d.\n",
sptes[root - 1], root);
root--;
put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg));
}
+#ifdef CONFIG_X86_64
static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
{
struct kvm_segment seg;
put_smstate(u32, buf, offset + 4, seg.limit);
put_smstate(u64, buf, offset + 8, seg.base);
}
+#endif
static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
{
return 0;
}
-static int do_remap_mfn(struct vm_area_struct *vma,
+static int do_remap_gfn(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *mfn, int nr,
+ xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages)
if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef CONFIG_XEN_PVH
/* We need to update the local page tables and the xen HAP */
- return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+ return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages);
#else
return -EINVAL;
#endif
}
- rmd.mfn = mfn;
+ rmd.mfn = gfn;
rmd.prot = prot;
/* We use the err_ptr to indicate if there we are doing a contigious
* mapping or a discontigious mapping. */
batch_left, &done, domid);
/*
- * @err_ptr may be the same buffer as @mfn, so
- * only clear it after each chunk of @mfn is
+ * @err_ptr may be the same buffer as @gfn, so
+ * only clear it after each chunk of @gfn is
* used.
*/
if (err_ptr) {
return err < 0 ? err : mapped;
}
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t mfn, int nr,
+ xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages)
{
- return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
+ return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *mfn, int nr,
+ xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid, struct page **pages)
{
* cause of "wrong memory was mapped in".
*/
BUG_ON(err_ptr == NULL);
- return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
+ return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Returns: 0 success */
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
{
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
return -EINVAL;
#endif
}
-EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
}
#endif
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
- ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
BUG();
*/
void *device_get_mac_address(struct device *dev, char *addr, int alen)
{
- addr = device_get_mac_addr(dev, "mac-address", addr, alen);
- if (addr)
- return addr;
+ char *res;
- addr = device_get_mac_addr(dev, "local-mac-address", addr, alen);
- if (addr)
- return addr;
+ res = device_get_mac_addr(dev, "mac-address", addr, alen);
+ if (res)
+ return res;
+
+ res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+ if (res)
+ return res;
return device_get_mac_addr(dev, "address", addr, alen);
}
struct virtio_blk_config, wce,
&writeback);
if (err)
- writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
- virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+ writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
return writeback;
}
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
vblk->disk->driverfs_dev = &vdev->dev;
+ vblk->disk->flags |= GENHD_FL_EXT_DEVT;
vblk->index = index;
/* configure queue flush support */
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
- VIRTIO_BLK_F_TOPOLOGY,
+ VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
};
struct blkfront_info *info)
{
struct grant *gnt_list_entry;
- unsigned long buffer_mfn;
+ unsigned long buffer_gfn;
BUG_ON(list_empty(&info->grants));
gnt_list_entry = list_first_entry(&info->grants, struct grant,
BUG_ON(!pfn);
gnt_list_entry->pfn = pfn;
}
- buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+ buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn);
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id,
- buffer_mfn, 0);
+ buffer_gfn, 0);
return gnt_list_entry;
}
{ 0 },
};
+static const struct exynos_cpuclk_cfg_data e4212_armclk_d[] __initconst = {
+ { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
+ { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
+ { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
+ { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
+ { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4210_CPU_DIV1(2, 4), },
+ { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4210_CPU_DIV1(2, 4), },
+ { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
+ { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
+ { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4210_CPU_DIV1(2, 3), },
+ { 0 },
+};
+
+#define E4412_CPU_DIV1(cores, hpm, copy) \
+ (((cores) << 8) | ((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e4412_armclk_d[] __initconst = {
+ { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(7, 0, 6), },
+ { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(6, 0, 6), },
+ { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(6, 0, 5), },
+ { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(5, 0, 5), },
+ { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4412_CPU_DIV1(5, 0, 4), },
+ { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4412_CPU_DIV1(4, 0, 4), },
+ { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(4, 0, 3), },
+ { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(3, 0, 3), },
+ { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(3, 0, 3), },
+ { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), },
+ { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), },
+ { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), },
+ { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), },
+ { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4412_CPU_DIV1(0, 0, 3), },
+ { 0 },
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
samsung_clk_register_fixed_factor(ctx,
exynos4x12_fixed_factor_clks,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+ if (of_machine_is_compatible("samsung,exynos4412")) {
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+ e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ } else {
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+ e4212_armclk_d, ARRAY_SIZE(e4212_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ }
}
samsung_clk_register_alias(ctx, exynos4_aliases,
This add the CPUfreq driver support for Versatile Express
big.LITTLE platforms using SPC for power management.
-
-config ARM_EXYNOS_CPUFREQ
- tristate "SAMSUNG EXYNOS CPUfreq Driver"
- depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
- depends on THERMAL
- help
- This adds the CPUFreq driver for Samsung EXYNOS platforms.
- Supported SoC versions are:
- Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
-
- If in doubt, say N.
-
-config ARM_EXYNOS4X12_CPUFREQ
- bool "SAMSUNG EXYNOS4x12"
- depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
- depends on ARM_EXYNOS_CPUFREQ
- default y
- help
- This adds the CPUFreq driver for Samsung EXYNOS4X12
- SoC (EXYNOS4212 or EXYNOS4412).
-
- If in doubt, say N.
-
-config ARM_EXYNOS5250_CPUFREQ
- bool "SAMSUNG EXYNOS5250"
- depends on SOC_EXYNOS5250
- depends on ARM_EXYNOS_CPUFREQ
- default y
- help
- This adds the CPUFreq driver for Samsung EXYNOS5250
- SoC.
-
- If in doubt, say N.
-
-config ARM_EXYNOS_CPU_FREQ_BOOST_SW
- bool "EXYNOS Frequency Overclocking - Software"
- depends on ARM_EXYNOS_CPUFREQ && THERMAL
- select CPU_FREQ_BOOST_SW
- select EXYNOS_THERMAL
- help
- This driver supports software managed overclocking (BOOST).
- It allows usage of special frequencies for Samsung Exynos
- processors if thermal conditions are appropriate.
-
- It requires, for safe operation, thermal framework with properly
- defined trip points.
-
- If in doubt, say N.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o
-arm-exynos-cpufreq-y := exynos-cpufreq.o
-arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
-arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
+++ /dev/null
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - CPU frequency scaling support for EXYNOS series
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/cpufreq.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/cpu_cooling.h>
-#include <linux/cpu.h>
-
-#include "exynos-cpufreq.h"
-
-static struct exynos_dvfs_info *exynos_info;
-static struct thermal_cooling_device *cdev;
-static struct regulator *arm_regulator;
-static unsigned int locking_frequency;
-
-static int exynos_cpufreq_get_index(unsigned int freq)
-{
- struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- struct cpufreq_frequency_table *pos;
-
- cpufreq_for_each_entry(pos, freq_table)
- if (pos->frequency == freq)
- break;
-
- if (pos->frequency == CPUFREQ_TABLE_END)
- return -EINVAL;
-
- return pos - freq_table;
-}
-
-static int exynos_cpufreq_scale(unsigned int target_freq)
-{
- struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- unsigned int *volt_table = exynos_info->volt_table;
- struct cpufreq_policy *policy = cpufreq_cpu_get(0);
- unsigned int arm_volt, safe_arm_volt = 0;
- unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
- struct device *dev = exynos_info->dev;
- unsigned int old_freq;
- int index, old_index;
- int ret = 0;
-
- old_freq = policy->cur;
-
- /*
- * The policy max have been changed so that we cannot get proper
- * old_index with cpufreq_frequency_table_target(). Thus, ignore
- * policy and get the index from the raw frequency table.
- */
- old_index = exynos_cpufreq_get_index(old_freq);
- if (old_index < 0) {
- ret = old_index;
- goto out;
- }
-
- index = exynos_cpufreq_get_index(target_freq);
- if (index < 0) {
- ret = index;
- goto out;
- }
-
- /*
- * ARM clock source will be changed APLL to MPLL temporary
- * To support this level, need to control regulator for
- * required voltage level
- */
- if (exynos_info->need_apll_change != NULL) {
- if (exynos_info->need_apll_change(old_index, index) &&
- (freq_table[index].frequency < mpll_freq_khz) &&
- (freq_table[old_index].frequency < mpll_freq_khz))
- safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
- }
- arm_volt = volt_table[index];
-
- /* When the new frequency is higher than current frequency */
- if ((target_freq > old_freq) && !safe_arm_volt) {
- /* Firstly, voltage up to increase frequency */
- ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- if (ret) {
- dev_err(dev, "failed to set cpu voltage to %d\n",
- arm_volt);
- return ret;
- }
- }
-
- if (safe_arm_volt) {
- ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
- safe_arm_volt);
- if (ret) {
- dev_err(dev, "failed to set cpu voltage to %d\n",
- safe_arm_volt);
- return ret;
- }
- }
-
- exynos_info->set_freq(old_index, index);
-
- /* When the new frequency is lower than current frequency */
- if ((target_freq < old_freq) ||
- ((target_freq > old_freq) && safe_arm_volt)) {
- /* down the voltage after frequency change */
- ret = regulator_set_voltage(arm_regulator, arm_volt,
- arm_volt);
- if (ret) {
- dev_err(dev, "failed to set cpu voltage to %d\n",
- arm_volt);
- goto out;
- }
- }
-
-out:
- cpufreq_cpu_put(policy);
-
- return ret;
-}
-
-static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
-{
- return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
-}
-
-static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- policy->clk = exynos_info->cpu_clk;
- policy->suspend_freq = locking_frequency;
- return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
-}
-
-static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = exynos_target,
- .get = cpufreq_generic_get,
- .init = exynos_cpufreq_cpu_init,
- .name = "exynos_cpufreq",
- .attr = cpufreq_generic_attr,
-#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
- .boost_supported = true,
-#endif
-#ifdef CONFIG_PM
- .suspend = cpufreq_generic_suspend,
-#endif
-};
-
-static int exynos_cpufreq_probe(struct platform_device *pdev)
-{
- struct device_node *cpu0;
- int ret = -EINVAL;
-
- exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
- if (!exynos_info)
- return -ENOMEM;
-
- exynos_info->dev = &pdev->dev;
-
- if (of_machine_is_compatible("samsung,exynos4212")) {
- exynos_info->type = EXYNOS_SOC_4212;
- ret = exynos4x12_cpufreq_init(exynos_info);
- } else if (of_machine_is_compatible("samsung,exynos4412")) {
- exynos_info->type = EXYNOS_SOC_4412;
- ret = exynos4x12_cpufreq_init(exynos_info);
- } else if (of_machine_is_compatible("samsung,exynos5250")) {
- exynos_info->type = EXYNOS_SOC_5250;
- ret = exynos5250_cpufreq_init(exynos_info);
- } else {
- pr_err("%s: Unknown SoC type\n", __func__);
- ret = -ENODEV;
- }
-
- if (ret)
- goto err_vdd_arm;
-
- if (exynos_info->set_freq == NULL) {
- dev_err(&pdev->dev, "No set_freq function (ERR)\n");
- ret = -EINVAL;
- goto err_vdd_arm;
- }
-
- arm_regulator = regulator_get(NULL, "vdd_arm");
- if (IS_ERR(arm_regulator)) {
- dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
- ret = -EINVAL;
- goto err_vdd_arm;
- }
-
- /* Done here as we want to capture boot frequency */
- locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
-
- ret = cpufreq_register_driver(&exynos_driver);
- if (ret)
- goto err_cpufreq_reg;
-
- cpu0 = of_get_cpu_node(0, NULL);
- if (!cpu0) {
- pr_err("failed to find cpu0 node\n");
- return 0;
- }
-
- if (of_find_property(cpu0, "#cooling-cells", NULL)) {
- cdev = of_cpufreq_cooling_register(cpu0,
- cpu_present_mask);
- if (IS_ERR(cdev))
- pr_err("running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev));
- }
-
- return 0;
-
-err_cpufreq_reg:
- dev_err(&pdev->dev, "failed to register cpufreq driver\n");
- regulator_put(arm_regulator);
-err_vdd_arm:
- kfree(exynos_info);
- return ret;
-}
-
-static struct platform_driver exynos_cpufreq_platdrv = {
- .driver = {
- .name = "exynos-cpufreq",
- },
- .probe = exynos_cpufreq_probe,
-};
-module_platform_driver(exynos_cpufreq_platdrv);
+++ /dev/null
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - CPUFreq support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-enum cpufreq_level_index {
- L0, L1, L2, L3, L4,
- L5, L6, L7, L8, L9,
- L10, L11, L12, L13, L14,
- L15, L16, L17, L18, L19,
- L20,
-};
-
-enum exynos_soc_type {
- EXYNOS_SOC_4212,
- EXYNOS_SOC_4412,
- EXYNOS_SOC_5250,
-};
-
-#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
- { \
- .freq = (f) * 1000, \
- .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
- (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
- .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
- .mps = ((m) << 16 | (p) << 8 | (s)), \
- }
-
-struct apll_freq {
- unsigned int freq;
- u32 clk_div_cpu0;
- u32 clk_div_cpu1;
- u32 mps;
-};
-
-struct exynos_dvfs_info {
- enum exynos_soc_type type;
- struct device *dev;
- unsigned long mpll_freq_khz;
- unsigned int pll_safe_idx;
- struct clk *cpu_clk;
- unsigned int *volt_table;
- struct cpufreq_frequency_table *freq_table;
- void (*set_freq)(unsigned int, unsigned int);
- bool (*need_apll_change)(unsigned int, unsigned int);
- void __iomem *cmu_regs;
-};
-
-#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
-extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
-#else
-static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
-{
- return -EOPNOTSUPP;
-}
-#endif
-#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
-extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
-#else
-static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
-#define EXYNOS4_CLKSRC_CPU 0x14200
-#define EXYNOS4_CLKMUX_STATCPU 0x14400
-
-#define EXYNOS4_CLKDIV_CPU 0x14500
-#define EXYNOS4_CLKDIV_CPU1 0x14504
-#define EXYNOS4_CLKDIV_STATCPU 0x14600
-#define EXYNOS4_CLKDIV_STATCPU1 0x14604
-
-#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
-#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
-
-#define EXYNOS5_APLL_LOCK 0x00000
-#define EXYNOS5_APLL_CON0 0x00100
-#define EXYNOS5_CLKMUX_STATCPU 0x00400
-#define EXYNOS5_CLKDIV_CPU0 0x00500
-#define EXYNOS5_CLKDIV_CPU1 0x00504
-#define EXYNOS5_CLKDIV_STATCPU0 0x00600
-#define EXYNOS5_CLKDIV_STATCPU1 0x00604
+++ /dev/null
-/*
- * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS4X12 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "exynos-cpufreq.h"
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-static struct exynos_dvfs_info *cpufreq;
-
-static unsigned int exynos4x12_volt_table[] = {
- 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
- 1000000, 987500, 975000, 950000, 925000, 900000, 900000
-};
-
-static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000},
- {0, L1, 1400 * 1000},
- {0, L2, 1300 * 1000},
- {0, L3, 1200 * 1000},
- {0, L4, 1100 * 1000},
- {0, L5, 1000 * 1000},
- {0, L6, 900 * 1000},
- {0, L7, 800 * 1000},
- {0, L8, 700 * 1000},
- {0, L9, 600 * 1000},
- {0, L10, 500 * 1000},
- {0, L11, 400 * 1000},
- {0, L12, 300 * 1000},
- {0, L13, 200 * 1000},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-static struct apll_freq *apll_freq_4x12;
-
-static struct apll_freq apll_freq_4212[] = {
- /*
- * values:
- * freq
- * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
- * clock divider for COPY, HPM, RESERVED
- * PLL M, P, S
- */
- APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
- APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
- APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
- APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
- APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
- APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
- APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
- APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
- APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
- APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
- APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
- APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
- APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
- APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
-};
-
-static struct apll_freq apll_freq_4412[] = {
- /*
- * values:
- * freq
- * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
- * clock divider for COPY, HPM, CORES
- * PLL M, P, S
- */
- APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
- APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
- APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
- APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
- APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
- APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
- APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
- APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
- APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
- APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
- APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
- APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
- APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
- APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
-};
-
-static void exynos4x12_set_clkdiv(unsigned int div_index)
-{
- unsigned int tmp;
-
- /* Change Divider - CPU0 */
-
- tmp = apll_freq_4x12[div_index].clk_div_cpu0;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
-
- while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU)
- & 0x11111111)
- cpu_relax();
-
- /* Change Divider - CPU1 */
- tmp = apll_freq_4x12[div_index].clk_div_cpu1;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
-
- do {
- cpu_relax();
- tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
- } while (tmp != 0x0);
-}
-
-static void exynos4x12_set_apll(unsigned int index)
-{
- unsigned int tmp, freq = apll_freq_4x12[index].freq;
-
- /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
- clk_set_parent(moutcore, mout_mpll);
-
- do {
- cpu_relax();
- tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
- >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
- tmp &= 0x7;
- } while (tmp != 0x2);
-
- clk_set_rate(mout_apll, freq * 1000);
-
- /* MUX_CORE_SEL = APLL */
- clk_set_parent(moutcore, mout_apll);
-
- do {
- cpu_relax();
- tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
- tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
- } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
-}
-
-static void exynos4x12_set_frequency(unsigned int old_index,
- unsigned int new_index)
-{
- if (old_index > new_index) {
- exynos4x12_set_clkdiv(new_index);
- exynos4x12_set_apll(new_index);
- } else if (old_index < new_index) {
- exynos4x12_set_apll(new_index);
- exynos4x12_set_clkdiv(new_index);
- }
-}
-
-int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
-{
- struct device_node *np;
- unsigned long rate;
-
- /*
- * HACK: This is a temporary workaround to get access to clock
- * controller registers directly and remove static mappings and
- * dependencies on platform headers. It is necessary to enable
- * Exynos multi-platform support and will be removed together with
- * this whole driver as soon as Exynos gets migrated to use
- * cpufreq-dt driver.
- */
- np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
- if (!np) {
- pr_err("%s: failed to find clock controller DT node\n",
- __func__);
- return -ENODEV;
- }
-
- info->cmu_regs = of_iomap(np, 0);
- if (!info->cmu_regs) {
- pr_err("%s: failed to map CMU registers\n", __func__);
- return -EFAULT;
- }
-
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
-
- moutcore = clk_get(NULL, "moutcore");
- if (IS_ERR(moutcore))
- goto err_moutcore;
-
- mout_mpll = clk_get(NULL, "mout_mpll");
- if (IS_ERR(mout_mpll))
- goto err_mout_mpll;
-
- rate = clk_get_rate(mout_mpll) / 1000;
-
- mout_apll = clk_get(NULL, "mout_apll");
- if (IS_ERR(mout_apll))
- goto err_mout_apll;
-
- if (info->type == EXYNOS_SOC_4212)
- apll_freq_4x12 = apll_freq_4212;
- else
- apll_freq_4x12 = apll_freq_4412;
-
- info->mpll_freq_khz = rate;
- /* 800Mhz */
- info->pll_safe_idx = L7;
- info->cpu_clk = cpu_clk;
- info->volt_table = exynos4x12_volt_table;
- info->freq_table = exynos4x12_freq_table;
- info->set_freq = exynos4x12_set_frequency;
-
- cpufreq = info;
-
- return 0;
-
-err_mout_apll:
- clk_put(mout_mpll);
-err_mout_mpll:
- clk_put(moutcore);
-err_moutcore:
- clk_put(cpu_clk);
-
- pr_debug("%s: failed initialization\n", __func__);
- return -EINVAL;
-}
+++ /dev/null
-/*
- * Copyright (c) 2010-20122Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS5250 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "exynos-cpufreq.h"
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-static struct exynos_dvfs_info *cpufreq;
-
-static unsigned int exynos5250_volt_table[] = {
- 1300000, 1250000, 1225000, 1200000, 1150000,
- 1125000, 1100000, 1075000, 1050000, 1025000,
- 1012500, 1000000, 975000, 950000, 937500,
- 925000
-};
-
-static struct cpufreq_frequency_table exynos5250_freq_table[] = {
- {0, L0, 1700 * 1000},
- {0, L1, 1600 * 1000},
- {0, L2, 1500 * 1000},
- {0, L3, 1400 * 1000},
- {0, L4, 1300 * 1000},
- {0, L5, 1200 * 1000},
- {0, L6, 1100 * 1000},
- {0, L7, 1000 * 1000},
- {0, L8, 900 * 1000},
- {0, L9, 800 * 1000},
- {0, L10, 700 * 1000},
- {0, L11, 600 * 1000},
- {0, L12, 500 * 1000},
- {0, L13, 400 * 1000},
- {0, L14, 300 * 1000},
- {0, L15, 200 * 1000},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-static struct apll_freq apll_freq_5250[] = {
- /*
- * values:
- * freq
- * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
- * clock divider for COPY, HPM, RESERVED
- * PLL M, P, S
- */
- APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
- APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
- APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
- APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
- APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
- APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
- APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
- APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
- APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
- APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
- APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
- APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
- APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
- APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
- APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
- APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
-};
-
-static void set_clkdiv(unsigned int div_index)
-{
- unsigned int tmp;
-
- /* Change Divider - CPU0 */
-
- tmp = apll_freq_5250[div_index].clk_div_cpu0;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0);
-
- while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0)
- & 0x11111111)
- cpu_relax();
-
- /* Change Divider - CPU1 */
- tmp = apll_freq_5250[div_index].clk_div_cpu1;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1);
-
- while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11)
- cpu_relax();
-}
-
-static void set_apll(unsigned int index)
-{
- unsigned int tmp;
- unsigned int freq = apll_freq_5250[index].freq;
-
- /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
- clk_set_parent(moutcore, mout_mpll);
-
- do {
- cpu_relax();
- tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU)
- >> 16);
- tmp &= 0x7;
- } while (tmp != 0x2);
-
- clk_set_rate(mout_apll, freq * 1000);
-
- /* MUX_CORE_SEL = APLL */
- clk_set_parent(moutcore, mout_apll);
-
- do {
- cpu_relax();
- tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU);
- tmp &= (0x7 << 16);
- } while (tmp != (0x1 << 16));
-}
-
-static void exynos5250_set_frequency(unsigned int old_index,
- unsigned int new_index)
-{
- if (old_index > new_index) {
- set_clkdiv(new_index);
- set_apll(new_index);
- } else if (old_index < new_index) {
- set_apll(new_index);
- set_clkdiv(new_index);
- }
-}
-
-int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
-{
- struct device_node *np;
- unsigned long rate;
-
- /*
- * HACK: This is a temporary workaround to get access to clock
- * controller registers directly and remove static mappings and
- * dependencies on platform headers. It is necessary to enable
- * Exynos multi-platform support and will be removed together with
- * this whole driver as soon as Exynos gets migrated to use
- * cpufreq-dt driver.
- */
- np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
- if (!np) {
- pr_err("%s: failed to find clock controller DT node\n",
- __func__);
- return -ENODEV;
- }
-
- info->cmu_regs = of_iomap(np, 0);
- if (!info->cmu_regs) {
- pr_err("%s: failed to map CMU registers\n", __func__);
- return -EFAULT;
- }
-
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
-
- moutcore = clk_get(NULL, "mout_cpu");
- if (IS_ERR(moutcore))
- goto err_moutcore;
-
- mout_mpll = clk_get(NULL, "mout_mpll");
- if (IS_ERR(mout_mpll))
- goto err_mout_mpll;
-
- rate = clk_get_rate(mout_mpll) / 1000;
-
- mout_apll = clk_get(NULL, "mout_apll");
- if (IS_ERR(mout_apll))
- goto err_mout_apll;
-
- info->mpll_freq_khz = rate;
- /* 800Mhz */
- info->pll_safe_idx = L9;
- info->cpu_clk = cpu_clk;
- info->volt_table = exynos5250_volt_table;
- info->freq_table = exynos5250_freq_table;
- info->set_freq = exynos5250_set_frequency;
-
- cpufreq = info;
-
- return 0;
-
-err_mout_apll:
- clk_put(mout_mpll);
-err_mout_mpll:
- clk_put(moutcore);
-err_moutcore:
- clk_put(cpu_clk);
-
- pr_err("%s: failed initialization\n", __func__);
- return -EINVAL;
-}
struct xenbus_transaction xbt;
ret = gnttab_grant_foreign_access(dev->otherend_id,
- virt_to_mfn(info->page), 0);
+ virt_to_gfn(info->page), 0);
if (ret < 0)
return ret;
info->gref = ret;
goto error_irqh;
}
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
- virt_to_mfn(info->page));
+ virt_to_gfn(info->page));
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
-obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
+omap2_nand-objs := omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
/* Micron */
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
{
const struct flash_info *id = spi_nor_ids;
- while (id->name[0]) {
+ while (id->name) {
if (!strcmp(name, id->name))
return id;
id++;
core_writel(priv, port, CORE_FAST_AGE_PORT);
reg = core_readl(priv, CORE_FAST_AGE_CTRL);
- reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
+ reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
core_writel(priv, reg, CORE_FAST_AGE_CTRL);
do {
if (!timeout)
return -ETIMEDOUT;
+ core_writel(priv, 0, CORE_FAST_AGE_CTRL);
+
return 0;
}
u32 reg;
reg = core_readl(priv, CORE_G_PCTL_PORT(port));
- cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
+ cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
switch (state) {
case BR_STATE_DISABLED:
}
/* Fast-age ARL entries if we are moving a port from Learning or
- * Forwarding state to Disabled, Blocking or Listening state
+ * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
+ * state (hw_state)
*/
if (cur_hw_state != hw_state) {
- if (cur_hw_state & 4 && !(hw_state & 4)) {
+ if (cur_hw_state >= G_MISTP_LEARN_STATE &&
+ hw_state <= G_MISTP_LISTEN_STATE) {
ret = bcm_sf2_sw_fast_age_port(ds, port);
if (ret) {
pr_err("%s: fast-ageing failed\n", __func__);
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
} \
-static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \
- u64 val) \
+static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
+ u32 off) \
{ \
spin_lock(&priv->indir_lock); \
reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
.port_join_bridge = mv88e6xxx_join_bridge,
.port_leave_bridge = mv88e6xxx_leave_bridge,
.port_stp_update = mv88e6xxx_port_stp_update,
+ .port_pvid_get = mv88e6xxx_port_pvid_get,
+ .port_pvid_set = mv88e6xxx_port_pvid_set,
+ .port_vlan_add = mv88e6xxx_port_vlan_add,
+ .port_vlan_del = mv88e6xxx_port_vlan_del,
+ .vlan_getnext = mv88e6xxx_vlan_getnext,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_complete(napi);
netdev_dbg(priv->dev,
"NAPI Complete, did %d packets with budget %d\n",
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rxdma_irq_lock);
+ netif_carrier_off(ndev);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "failed to register TSE net device\n");
INIT_LIST_HEAD(&lio->glist);
for (i = 0; i < lio->tx_qsize; i++) {
- g = kmalloc(sizeof(*g), GFP_KERNEL);
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
- memset(g, 0, sizeof(struct octnic_gather));
g->sg_size =
((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
{
- int ver, chip;
u16 device_id;
/* Retrieve adapter's device ID */
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
- ver = device_id >> 12;
- switch (ver) {
+
+ switch (device_id >> 12) {
case CHELSIO_T4:
- chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
- break;
+ return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
case CHELSIO_T5:
- chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
- break;
+ return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
case CHELSIO_T6:
- chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
- break;
+ return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
default:
dev_err(&pdev->dev, "Device %d is not supported\n",
device_id);
- return -EINVAL;
}
- return chip;
+ return -EINVAL;
}
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
- t4_write_reg(adapter, SGE_STAT_CFG_A,
- STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
if (err)
goto out_unmap_bar;
+ /* configure SGE_STAT_CFG_A to read WC stats */
+ if (!is_t4(adapter->params.chip))
+ t4_write_reg(adapter, SGE_STAT_CFG_A,
+ STATSOURCE_T5_V(7) | STATMODE_V(0));
+
for_each_port(adapter, i) {
struct net_device *netdev;
* message or, if we're doing a Large Send Offload, an LSO CPL message
* with an embedded TX Packet Write CPL message.
*/
- flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size)
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_lso_core) +
struct fw_ldst_cmd {
__be32 op_to_addrspace;
-#define FW_LDST_CMD_ADDRSPACE_S 0
-#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
__be32 cycles_to_len16;
union fw_ldst {
struct fw_ldst_addrval {
__be16 vctl;
__be16 rval;
} mdio;
+ struct fw_ldst_cim_rq {
+ u8 req_first64[8];
+ u8 req_second64[8];
+ u8 resp_first64[8];
+ u8 resp_second64[8];
+ __be32 r3[2];
+ } cim_rq;
union fw_ldst_mps {
struct fw_ldst_mps_rplc {
__be16 fid_idx;
__be16 nset_pkd;
__be32 data[12];
} pcie;
+ struct fw_ldst_i2c_deprecated {
+ u8 pid_pkd;
+ u8 base;
+ u8 boffset;
+ u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ u8 pid;
+ u8 did;
+ u8 boffset;
+ u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ u8 val[33];
+ u8 r11[7];
+ } le;
} u;
};
+#define FW_LDST_CMD_ADDRSPACE_S 0
+#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
+
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0D
-#define T4FW_VERSION_MICRO 0x20
+#define T4FW_VERSION_MINOR 0x0E
+#define T4FW_VERSION_MICRO 0x04
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0D
-#define T5FW_VERSION_MICRO 0x20
+#define T5FW_VERSION_MINOR 0x0E
+#define T5FW_VERSION_MICRO 0x04
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0D
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MINOR 0x0E
+#define T6FW_VERSION_MICRO 0x04
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
if (int_status & ISR_PRS)
dm9000_rx(dev);
- /* Trnasmit Interrupt check */
+ /* Transmit Interrupt check */
if (int_status & ISR_PTS)
dm9000_tx_done(dev, db);
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
}
- status = be_mcc_notify(adapter);
+ status = be_mcc_notify_wait(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
- priv->phy_id = -1;
-
-#ifdef CONFIG_OF
- {
const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
NULL);
if (mac)
memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
- }
-#endif
+ priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
+ int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
+ else
+ ret = 0;
fep->mii_timeout = 0;
reinit_completion(&fep->mdio_done);
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
}
+ /* Add netif status check here to avoid system hang in below case:
+ * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
+ * After ethx down, fec all clocks are gated off and then register
+ * access causes system hang.
+ */
+ if (!netif_running(ndev))
+ return 0;
+
writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
fep->hwp + FEC_ADDR_LOW);
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
- txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ txring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->tx_ring_size, GFP_ATOMIC);
if (unlikely(!(txring->bufinf)))
goto err_free_txring;
* Initialize Transmit Descriptors
*/
memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
- memset(txring->bufinf, 0,
- sizeof(struct jme_buffer_info) * jme->tx_ring_size);
return 0;
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
- rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ rxring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->rx_ring_size, GFP_ATOMIC);
if (unlikely(!(rxring->bufinf)))
goto err_free_rxring;
/*
* Initiallize Receive Descriptors
*/
- memset(rxring->bufinf, 0,
- sizeof(struct jme_buffer_info) * jme->rx_ring_size);
for (i = 0 ; i < jme->rx_ring_size ; ++i) {
if (unlikely(jme_make_new_rx_buf(jme, i))) {
jme_free_rx_resources(jme);
return;
}
- mc_spec = kmalloc(0x200, GFP_ATOMIC);
+ mc_spec = kzalloc(0x200, GFP_ATOMIC);
if (mc_spec == NULL)
goto oom;
mc_other = mc_spec + (0x100 >> 2);
- memset(mc_spec, 0, 0x100);
- memset(mc_other, 0, 0x100);
-
netdev_for_each_mc_addr(ha, dev) {
u8 *a = ha->addr;
u32 *table;
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
mbx->cmd_op = type;
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = type;
break;
}
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = (type | (mbx->req.num << 16) |
(3 << 29));
mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
unsigned features;
struct mii_if_info mii;
- struct rtl8169_counters counters;
+ dma_addr_t counters_phys_addr;
+ struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
u32 opts1_mask;
}
}
-static struct rtl8169_counters *rtl8169_map_counters(struct net_device *dev,
- dma_addr_t *paddr,
- u32 counter_cmd)
+DECLARE_RTL_COND(rtl_counters_cond)
{
- struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- struct device *d = &tp->pci_dev->dev;
- struct rtl8169_counters *counters;
- u32 cmd;
- counters = dma_alloc_coherent(d, sizeof(*counters), paddr, GFP_KERNEL);
- if (counters) {
- RTL_W32(CounterAddrHigh, (u64)*paddr >> 32);
- cmd = (u64)*paddr & DMA_BIT_MASK(32);
- RTL_W32(CounterAddrLow, cmd);
- RTL_W32(CounterAddrLow, cmd | counter_cmd);
- }
- return counters;
+ return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
}
-static void rtl8169_unmap_counters (struct net_device *dev,
- dma_addr_t paddr,
- struct rtl8169_counters *counters)
+static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- struct device *d = &tp->pci_dev->dev;
+ dma_addr_t paddr = tp->counters_phys_addr;
+ u32 cmd;
+ bool ret;
- RTL_W32(CounterAddrLow, 0);
- RTL_W32(CounterAddrHigh, 0);
+ RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ cmd = (u64)paddr & DMA_BIT_MASK(32);
+ RTL_W32(CounterAddrLow, cmd);
+ RTL_W32(CounterAddrLow, cmd | counter_cmd);
- dma_free_coherent(d, sizeof(*counters), counters, paddr);
-}
+ ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
-DECLARE_RTL_COND(rtl_reset_counters_cond)
-{
- void __iomem *ioaddr = tp->mmio_addr;
+ RTL_W32(CounterAddrLow, 0);
+ RTL_W32(CounterAddrHigh, 0);
- return RTL_R32(CounterAddrLow) & CounterReset;
+ return ret;
}
static bool rtl8169_reset_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- struct rtl8169_counters *counters;
- dma_addr_t paddr;
- bool ret = true;
/*
* Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
if (tp->mac_version < RTL_GIGA_MAC_VER_19)
return true;
- counters = rtl8169_map_counters(dev, &paddr, CounterReset);
- if (!counters)
- return false;
-
- if (!rtl_udelay_loop_wait_low(tp, &rtl_reset_counters_cond, 10, 1000))
- ret = false;
-
- rtl8169_unmap_counters(dev, paddr, counters);
-
- return ret;
-}
-
-DECLARE_RTL_COND(rtl_counters_cond)
-{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(CounterAddrLow) & CounterDump;
+ return rtl8169_do_counters(dev, CounterReset);
}
static bool rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- struct rtl8169_counters *counters;
- dma_addr_t paddr;
- bool ret = true;
/*
* Some chips are unable to dump tally counters when the receiver
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
return true;
- counters = rtl8169_map_counters(dev, &paddr, CounterDump);
- if (!counters)
- return false;
-
- if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
- memcpy(&tp->counters, counters, sizeof(*counters));
- else
- ret = false;
-
- rtl8169_unmap_counters(dev, paddr, counters);
-
- return ret;
+ return rtl8169_do_counters(dev, CounterDump);
}
static bool rtl8169_init_counter_offsets(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_counters *counters = tp->counters;
bool ret = false;
/*
if (rtl8169_update_counters(dev))
ret = true;
- tp->tc_offset.tx_errors = tp->counters.tx_errors;
- tp->tc_offset.tx_multi_collision = tp->counters.tx_multi_collision;
- tp->tc_offset.tx_aborted = tp->counters.tx_aborted;
+ tp->tc_offset.tx_errors = counters->tx_errors;
+ tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
+ tp->tc_offset.tx_aborted = counters->tx_aborted;
tp->tc_offset.inited = true;
return ret;
struct ethtool_stats *stats, u64 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL();
rtl8169_update_counters(dev);
- data[0] = le64_to_cpu(tp->counters.tx_packets);
- data[1] = le64_to_cpu(tp->counters.rx_packets);
- data[2] = le64_to_cpu(tp->counters.tx_errors);
- data[3] = le32_to_cpu(tp->counters.rx_errors);
- data[4] = le16_to_cpu(tp->counters.rx_missed);
- data[5] = le16_to_cpu(tp->counters.align_errors);
- data[6] = le32_to_cpu(tp->counters.tx_one_collision);
- data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
- data[8] = le64_to_cpu(tp->counters.rx_unicast);
- data[9] = le64_to_cpu(tp->counters.rx_broadcast);
- data[10] = le32_to_cpu(tp->counters.rx_multicast);
- data[11] = le16_to_cpu(tp->counters.tx_aborted);
- data[12] = le16_to_cpu(tp->counters.tx_underun);
+ data[0] = le64_to_cpu(counters->tx_packets);
+ data[1] = le64_to_cpu(counters->rx_packets);
+ data[2] = le64_to_cpu(counters->tx_errors);
+ data[3] = le32_to_cpu(counters->rx_errors);
+ data[4] = le16_to_cpu(counters->rx_missed);
+ data[5] = le16_to_cpu(counters->align_errors);
+ data[6] = le32_to_cpu(counters->tx_one_collision);
+ data[7] = le32_to_cpu(counters->tx_multi_collision);
+ data[8] = le64_to_cpu(counters->rx_unicast);
+ data[9] = le64_to_cpu(counters->rx_broadcast);
+ data[10] = le32_to_cpu(counters->rx_multicast);
+ data[11] = le16_to_cpu(counters->tx_aborted);
+ data[12] = le16_to_cpu(counters->tx_underun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct rtl8169_counters *counters = tp->counters;
unsigned int start;
if (netif_running(dev))
* Subtract values fetched during initalization.
* See rtl8169_init_counter_offsets for a description why we do that.
*/
- stats->tx_errors = le64_to_cpu(tp->counters.tx_errors) -
+ stats->tx_errors = le64_to_cpu(counters->tx_errors) -
le64_to_cpu(tp->tc_offset.tx_errors);
- stats->collisions = le32_to_cpu(tp->counters.tx_multi_collision) -
+ stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
le32_to_cpu(tp->tc_offset.tx_multi_collision);
- stats->tx_aborted_errors = le16_to_cpu(tp->counters.tx_aborted) -
+ stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
le16_to_cpu(tp->tc_offset.tx_aborted);
return stats;
unregister_netdev(dev);
+ dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
+ tp->counters, tp->counters_phys_addr);
+
rtl_release_firmware(tp);
if (pci_dev_run_wake(pdev))
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+ tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+ &tp->counters_phys_addr, GFP_KERNEL);
+ if (!tp->counters) {
+ rc = -ENOMEM;
+ goto err_out_msi_4;
+ }
+
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_msi_4;
+ goto err_out_cnt_5;
pci_set_drvdata(pdev, dev);
out:
return rc;
+err_out_cnt_5:
+ dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
+ tp->counters_phys_addr);
err_out_msi_4:
netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
interface);
}
- if (IS_ERR(phydev)) {
+ if (IS_ERR_OR_NULL(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
+ if (!phydev)
+ return -ENODEV;
+
return PTR_ERR(phydev);
}
select PHYLIB
select CRC32
select MII
- depends on OF
+ depends on OF && HAS_DMA
---help---
This driver supports the DWC Ethernet QoS from Synopsys
---help---
Currently supports the DP83867 PHY.
+config MICROCHIP_PHY
+ tristate "Drivers for Microchip PHYs"
+ help
+ Supports the LAN88XX PHYs.
+
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
+obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
phy_addr = phy_fixed_addr++;
spin_unlock(&phy_fixed_addr_lock);
- ret = fixed_phy_add(PHY_POLL, phy_addr, status, link_gpio);
+ ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
if (ret < 0)
return ERR_PTR(ret);
--- /dev/null
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/microchipphy.h>
+
+#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC "Microchip LAN88XX PHY driver"
+
+struct lan88xx_priv {
+ int chip_id;
+ int chip_rev;
+ __u32 wolopts;
+};
+
+static int lan88xx_phy_config_intr(struct phy_device *phydev)
+{
+ int rc;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* unmask all source and clear them before enable */
+ rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
+ rc = phy_read(phydev, LAN88XX_INT_STS);
+ rc = phy_write(phydev, LAN88XX_INT_MASK,
+ LAN88XX_INT_MASK_MDINTPIN_EN_ |
+ LAN88XX_INT_MASK_LINK_CHANGE_);
+ } else {
+ rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
+ }
+
+ return rc < 0 ? rc : 0;
+}
+
+static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = phy_read(phydev, LAN88XX_INT_STS);
+
+ return rc < 0 ? rc : 0;
+}
+
+int lan88xx_suspend(struct phy_device *phydev)
+{
+ struct lan88xx_priv *priv = phydev->priv;
+
+ /* do not power down PHY when WOL is enabled */
+ if (!priv->wolopts)
+ genphy_suspend(phydev);
+
+ return 0;
+}
+
+static int lan88xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct lan88xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->wolopts = 0;
+
+ /* these values can be used to identify internal PHY */
+ priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID,
+ 3, phydev->addr);
+ priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV,
+ 3, phydev->addr);
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static void lan88xx_remove(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct lan88xx_priv *priv = phydev->priv;
+
+ if (priv)
+ devm_kfree(dev, priv);
+}
+
+static int lan88xx_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan88xx_priv *priv = phydev->priv;
+
+ priv->wolopts = wol->wolopts;
+
+ return 0;
+}
+
+static struct phy_driver microchip_phy_driver[] = {
+{
+ .phy_id = 0x0007c130,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Microchip LAN88xx",
+
+ .features = (PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+ .probe = lan88xx_probe,
+ .remove = lan88xx_remove,
+
+ .config_init = genphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+
+ .ack_interrupt = lan88xx_phy_ack_interrupt,
+ .config_intr = lan88xx_phy_config_intr,
+
+ .suspend = lan88xx_suspend,
+ .resume = genphy_resume,
+ .set_wol = lan88xx_set_wol,
+
+ .driver = { .owner = THIS_MODULE, }
+} };
+
+module_phy_driver(microchip_phy_driver);
+
+static struct mdio_device_id __maybe_unused microchip_tbl[] = {
+ { 0x0007c130, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, microchip_tbl);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
{
struct mii_if_info *mii = &dev->mii;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
- u16 ladv, radv;
- int ret;
+ int ladv, radv, ret;
u32 buf;
/* clear PHY interrupt status */
}
ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
- if (unlikely(ladv < 0))
- return -EIO;
+ if (ladv < 0)
+ return ladv;
radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
- if (unlikely(radv < 0))
- return -EIO;
+ if (radv < 0)
+ return radv;
netif_dbg(dev, link, dev->net,
"speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
-/* Version Information */
-#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
+/* Information for net-next */
+#define NETNEXT_VERSION "08"
+
+/* Information for net */
+#define NET_VERSION "2"
+
+#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
#define OCP_EEE_ABLE 0xa5c4
#define OCP_EEE_ADV 0xa5d0
#define OCP_EEE_LPABLE 0xa5d2
+#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
#define OCP_ADC_CFG 0xbc06
/* SRAM Register */
/* OCP_DOWN_SPEED */
#define EN_10M_BGOFF 0x0080
+/* OCP_PHY_STATE */
+#define TXDIS_STATE 0x01
+#define ABD_STATE 0x02
+
/* OCP_ADC_CFG */
#define CKADSEL_L 0x0100
#define ADC_EN 0x0080
void (*unload)(struct r8152 *);
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
+ bool (*in_nway)(struct r8152 *);
} rtl_ops;
int intr_interval;
r8153_enable_aldps(tp);
}
+static bool rtl8152_in_nway(struct r8152 *tp)
+{
+ u16 nway_state;
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000);
+ tp->ocp_base = 0x2000;
+ ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c); /* phy state */
+ nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a);
+
+ /* bit 15: TXDIS_STATE, bit 14: ABD_STATE */
+ if (nway_state & 0xc000)
+ return false;
+ else
+ return true;
+}
+
+static bool rtl8153_in_nway(struct r8152 *tp)
+{
+ u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff;
+
+ if (phy_state == TXDIS_STATE || phy_state == ABD_STATE)
+ return false;
+ else
+ return true;
+}
+
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
return 0;
}
+static bool delay_autosuspend(struct r8152 *tp)
+{
+ bool sw_linking = !!netif_carrier_ok(tp->netdev);
+ bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS);
+
+ /* This means a linking change occurs and the driver doesn't detect it,
+ * yet. If the driver has disabled tx/rx and hw is linking on, the
+ * device wouldn't wake up by receiving any packet.
+ */
+ if (work_busy(&tp->schedule.work) || sw_linking != hw_linking)
+ return true;
+
+ /* If the linking down is occurred by nway, the device may miss the
+ * linking change event. And it wouldn't wake when linking on.
+ */
+ if (!sw_linking && tp->rtl_ops.in_nway(tp))
+ return true;
+ else
+ return false;
+}
+
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
mutex_lock(&tp->control);
if (PMSG_IS_AUTO(message)) {
- if (netif_running(netdev) && work_busy(&tp->schedule.work)) {
+ if (netif_running(netdev) && delay_autosuspend(tp)) {
ret = -EBUSY;
goto out1;
}
ops->unload = rtl8152_unload;
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
+ ops->in_nway = rtl8152_in_nway;
break;
case RTL_VER_03:
ops->unload = rtl8153_unload;
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
+ ops->in_nway = rtl8153_in_nway;
break;
default:
old_state = entry->state;
entry->state = state;
__skb_unlink(skb, list);
- spin_unlock(&list->lock);
- spin_lock(&dev->done.lock);
+
+ /* defer_bh() is never called with list == &dev->done.
+ * spin_lock_nested() tells lockdep that it is OK to take
+ * dev->done.lock here with list->lock held.
+ */
+ spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
+
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
- spin_unlock_irqrestore(&dev->done.lock, flags);
+ spin_unlock(&dev->done.lock);
+ spin_unlock_irqrestore(&list->lock, flags);
return old_state;
}
/*-------------------------------------------------------------------------*/
+static void wait_skb_queue_empty(struct sk_buff_head *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
unlink_urbs(dev, &dev->rxq);
/* maybe wait for deletions to finish. */
- while (!skb_queue_empty(&dev->rxq)
- && !skb_queue_empty(&dev->txq)
- && !skb_queue_empty(&dev->done)) {
- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
- set_current_state(TASK_UNINTERRUPTIBLE);
- netif_dbg(dev, ifdown, dev->net,
- "waited for %d urb completions\n", temp);
- }
+ wait_skb_queue_empty(&dev->rxq);
+ wait_skb_queue_empty(&dev->txq);
+ wait_skb_queue_empty(&dev->done);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
}
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
- struct ip_tunnel_info *info;
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
u32 flags, vni;
if (!tun_dst)
goto drop;
- info = &tun_dst->u.tun_info;
- md = ip_tunnel_info_opts(info);
+ md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
} else {
memset(md, 0, sizeof(*md));
}
md->gbp = ntohs(gbp->policy_id);
if (tun_dst)
- info->key.tun_flags |= TUNNEL_VXLAN_OPT;
+ tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;
if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
netdev_err(dev, "trying to enslave non-active device %s\n",
slave_name);
+ if (slave_dev)
+ dev_put(slave_dev);
return -EPERM;
}
struct xenvif_stats stats;
};
-/* Maximum number of Rx slots a to-guest packet may use, including the
- * slot needed for GSO meta-data.
- */
-#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
-
enum state_bit_shift {
/* This bit marks that the vif is connected */
VIF_STATUS_CONNECTED,
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
-/* Determine whether the needed number of slots (req) are available,
- * and set req_event if not.
- */
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
-
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
return i & (MAX_PENDING_REQS-1);
}
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
+static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
+{
+ if (vif->gso_mask)
+ return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
+ else
+ return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+}
+
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
+ int needed;
+
+ needed = xenvif_rx_ring_slots_needed(queue->vif);
do {
prod = queue->rx.sring->req_prod;
} else {
copy_gop->source.domid = DOMID_SELF;
copy_gop->source.u.gmfn =
- virt_to_mfn(page_address(page));
+ virt_to_gfn(page_address(page));
}
copy_gop->source.offset = offset;
skb_queue_head_init(&rxq);
- while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+ while (xenvif_rx_ring_slots_available(queue)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
queue->last_rx_time = jiffies;
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
- virt_to_mfn(skb->data);
+ virt_to_gfn(skb->data);
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data);
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return !queue->stalled
- && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+ return !queue->stalled && prod - cons < 1
&& time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return queue->stalled
- && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+ return queue->stalled && prod - cons >= 1;
}
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
- && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+ && xenvif_rx_ring_slots_available(queue))
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))
if (!xen_domain())
return -ENODEV;
- /* Allow as many queues as there are CPUs, by default */
- xenvif_max_queues = num_online_cpus();
+ /* Allow as many queues as there are CPUs if user has not
+ * specified a value.
+ */
+ if (xenvif_max_queues == 0)
+ xenvif_max_queues = num_online_cpus();
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
struct sk_buff *skb;
unsigned short id;
grant_ref_t ref;
- unsigned long pfn;
+ unsigned long gfn;
struct xen_netif_rx_request *req;
skb = xennet_alloc_one_rx_buffer(queue);
BUG_ON((signed short)ref < 0);
queue->grant_rx_ref[id] = ref;
- pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+ gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
req = RING_GET_REQUEST(&queue->rx, req_prod);
gnttab_grant_foreign_access_ref(ref,
queue->info->xbdev->otherend_id,
- pfn_to_mfn(pfn),
+ gfn,
0);
req->id = id;
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
- gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
- page_to_mfn(page), GNTMAP_readonly);
+ gnttab_grant_foreign_access_ref(ref,
+ queue->info->xbdev->otherend_id,
+ xen_page_to_gfn(page),
+ GNTMAP_readonly);
queue->tx_skbs[id].skb = skb;
queue->grant_tx_page[id] = page;
pr_info("Initialising Xen virtual ethernet driver\n");
- /* Allow as many queues as there are CPUs, by default */
- xennet_max_queues = num_online_cpus();
+ /* Allow as many queues as there are CPUs if user has not
+ * specified a value.
+ */
+ if (xennet_max_queues == 0)
+ xennet_max_queues = num_online_cpus();
return xenbus_register_frontend(&netfront_driver);
}
To compile this driver as a module, choose M here: the module
will be called pwm-lp3943.
+config PWM_LPC18XX_SCT
+ tristate "LPC18xx/43xx PWM/SCT support"
+ depends on ARCH_LPC18XX
+ help
+ Generic PWM framework driver for NXP LPC18xx PWM/SCT which
+ supports 16 channels.
+ A maximum of 15 channels can be requested simultaneously and
+ must have the same period.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-lpc18xx-sct.
+
config PWM_LPC32XX
tristate "LPC32XX PWM support"
depends on ARCH_LPC32XX
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
+obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
* pwm_set_chip_data() - set private chip data for a PWM
* @pwm: PWM device
* @data: pointer to chip-specific data
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
/**
* pwm_get_chip_data() - get private chip data for a PWM
* @pwm: PWM device
+ *
+ * Returns: A pointer to the chip-private data for the PWM device.
*/
void *pwm_get_chip_data(struct pwm_device *pwm)
{
* Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
* will be used. The initial polarity for all channels is specified by the
* @polarity parameter.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwmchip_add_with_polarity(struct pwm_chip *chip,
enum pwm_polarity polarity)
*
* Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
* will be used. The initial polarity for all channels is normal.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwmchip_add(struct pwm_chip *chip)
{
*
* Removes a PWM chip. This function may return busy if the PWM chip provides
* a PWM device that is still requested.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwmchip_remove(struct pwm_chip *chip)
{
/**
* pwm_request() - request a PWM device
- * @pwm_id: global PWM device index
+ * @pwm: global PWM device index
* @label: PWM device label
*
* This function is deprecated, use pwm_get() instead.
+ *
+ * Returns: A pointer to a PWM device or an ERR_PTR()-encoded error code on
+ * failure.
*/
struct pwm_device *pwm_request(int pwm, const char *label)
{
* @index: per-chip index of the PWM to request
* @label: a literal description string of this PWM
*
- * Returns the PWM at the given index of the given PWM chip. A negative error
- * code is returned if the index is not valid for the specified PWM chip or
- * if the PWM device cannot be requested.
+ * Returns: A pointer to the PWM device at the given index of the given PWM
+ * chip. A negative error code is returned if the index is not valid for the
+ * specified PWM chip or if the PWM device cannot be requested.
*/
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
* @pwm: PWM device
* @duty_ns: "on" time (in nanoseconds)
* @period_ns: duration (in nanoseconds) of one cycle
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
* @pwm: PWM device
* @polarity: new polarity of the PWM signal
*
- * Note that the polarity cannot be configured while the PWM device is enabled
+ * Note that the polarity cannot be configured while the PWM device is
+ * enabled.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
{
if (!pwm->chip->ops->set_polarity)
return -ENOSYS;
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
return -EBUSY;
err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
/**
* pwm_enable() - start a PWM output toggling
* @pwm: PWM device
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_enable(struct pwm_device *pwm)
{
* lookup of the PWM index. This also means that the "pwm-names" property
* becomes mandatory for devices that look up the PWM device via the con_id
* parameter.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
{
*
* Once a PWM chip has been found the specified PWM device will be requested
* and is ready to be used.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *pwm_get(struct device *dev, const char *con_id)
{
*
* This function performs like pwm_get() but the acquired PWM device will
* automatically be released on driver detach.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
{
*
* This function performs like of_pwm_get() but the acquired PWM device will
* automatically be released on driver detach.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
const char *con_id)
* pwm_can_sleep() - report whether PWM access will sleep
* @pwm: PWM device
*
- * It returns true if accessing the PWM can sleep, false otherwise.
+ * Returns: True if accessing the PWM can sleep, false otherwise.
*/
bool pwm_can_sleep(struct pwm_device *pwm)
{
if (test_bit(PWMF_REQUESTED, &pwm->flags))
seq_puts(s, " requested");
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
seq_puts(s, " enabled");
seq_puts(s, "\n");
return 0;
}
-
subsys_initcall(pwm_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
};
static const struct of_device_id atmel_hlcdc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9n12-hlcdc",
+ /* 9n12 has same errata as 9x5 HLCDC PWM */
+ .data = &atmel_hlcdc_pwm_at91sam9x5_errata,
+ },
{
.compatible = "atmel,at91sam9x5-hlcdc",
.data = &atmel_hlcdc_pwm_at91sam9x5_errata,
tcbpwm->duty = duty;
/* If the PWM is enabled, call enable to apply the new conf */
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
atmel_tcb_pwm_enable(chip, pwm);
return 0;
u32 val;
int ret;
- if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) {
+ if (pwm_is_enabled(pwm) && (period_ns != pwm_get_period(pwm))) {
dev_err(chip->dev, "cannot change PWM period while enabled\n");
return -EBUSY;
}
* If the PWM channel is enabled, only update CDTY by using the update
* register, it needs to set bit 10 of CMR to 0
*/
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
return;
/*
* If the PWM channel is disabled, write value to duty and period
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (pwm_is_enabled(pwm)) {
/*
* If the PWM channel is enabled, using the duty update register
* to update the value.
return container_of(_chip, struct kona_pwmc, chip);
}
-static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
+/*
+ * Clear trigger bit but set smooth bit to maintain old output.
+ */
+static void kona_pwmc_prepare_for_settings(struct kona_pwmc *kp,
+ unsigned int chan)
{
unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
- /* Clear trigger bit but set smooth bit to maintain old output */
value |= 1 << PWM_CONTROL_SMOOTH_SHIFT(chan);
value &= ~(1 << PWM_CONTROL_TRIGGER_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
+ /*
+ * There must be a min 400ns delay between clearing trigger and setting
+ * it. Failing to do this may result in no PWM signal.
+ */
+ ndelay(400);
+}
+
+static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
+{
+ unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
+
/* Set trigger bit and clear smooth bit to apply new settings */
value &= ~(1 << PWM_CONTROL_SMOOTH_SHIFT(chan));
value |= 1 << PWM_CONTROL_TRIGGER_SHIFT(chan);
writel(value, kp->base + PWM_CONTROL_OFFSET);
+
+ /* Trigger bit must be held high for at least 400 ns. */
+ ndelay(400);
}
static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
- /* If the PWM channel is enabled, write the settings to the HW */
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ /*
+ * Don't apply settings if disabled. The period and duty cycle are
+ * always calculated above to ensure the new values are
+ * validated immediately instead of on enable.
+ */
+ if (pwm_is_enabled(pwm)) {
+ kona_pwmc_prepare_for_settings(kp, chan);
+
value = readl(kp->base + PRESCALE_OFFSET);
value &= ~PRESCALE_MASK(chan);
value |= prescale << PRESCALE_SHIFT(chan);
return ret;
}
+ kona_pwmc_prepare_for_settings(kp, chan);
+
value = readl(kp->base + PWM_CONTROL_OFFSET);
if (polarity == PWM_POLARITY_NORMAL)
kona_pwmc_apply_settings(kp, chan);
- /* Wait for waveform to settle before gating off the clock */
- ndelay(400);
-
clk_disable_unprepare(kp->clk);
return 0;
return ret;
}
- ret = kona_pwmc_config(chip, pwm, pwm->duty_cycle, pwm->period);
+ ret = kona_pwmc_config(chip, pwm, pwm_get_duty_cycle(pwm),
+ pwm_get_period(pwm));
if (ret < 0) {
clk_disable_unprepare(kp->clk);
return ret;
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
unsigned int chan = pwm->hwpwm;
+ unsigned int value;
+
+ kona_pwmc_prepare_for_settings(kp, chan);
/* Simulate a disable by configuring for zero duty */
writel(0, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
- kona_pwmc_apply_settings(kp, chan);
+ writel(0, kp->base + PERIOD_COUNT_OFFSET(chan));
- /* Wait for waveform to settle before gating off the clock */
- ndelay(400);
+ /* Set prescale to 0 for this channel */
+ value = readl(kp->base + PRESCALE_OFFSET);
+ value &= ~PRESCALE_MASK(chan);
+ writel(value, kp->base + PRESCALE_OFFSET);
+
+ kona_pwmc_apply_settings(kp, chan);
clk_disable_unprepare(kp->clk);
}
unsigned int chan;
for (chan = 0; chan < kp->chip.npwm; chan++)
- if (test_bit(PWMF_ENABLED, &kp->chip.pwms[chan].flags))
+ if (pwm_is_enabled(&kp->chip.pwms[chan]))
clk_disable_unprepare(kp->clk);
return pwmchip_remove(&kp->chip);
* The clock needs to be enabled to access the PWM registers.
* Configuration can be changed at any time.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
ret = clk_enable(ep93xx_pwm->clk);
if (ret)
return ret;
ret = -EINVAL;
}
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
clk_disable(ep93xx_pwm->clk);
return ret;
unsigned long long c;
unsigned long period_cycles, duty_cycles, prescale;
unsigned int period_ms;
- bool enable = test_bit(PWMF_ENABLED, &pwm->flags);
+ bool enable = pwm_is_enabled(pwm);
int wait_count = 0, fifoav;
u32 cr, sr;
sr = readl(imx->mmio_base + MX3_PWMSR);
fifoav = sr & MX3_PWMSR_FIFOAV_MASK;
if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
- period_ms = DIV_ROUND_UP(pwm->period, NSEC_PER_MSEC);
+ period_ms = DIV_ROUND_UP(pwm_get_period(pwm),
+ NSEC_PER_MSEC);
msleep(period_ms);
sr = readl(imx->mmio_base + MX3_PWMSR);
--- /dev/null
+/*
+ * NXP LPC18xx State Configurable Timer - Pulse Width Modulator driver
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * Notes
+ * =====
+ * NXP LPC18xx provides a State Configurable Timer (SCT) which can be configured
+ * as a Pulse Width Modulator.
+ *
+ * SCT supports 16 outputs, 16 events and 16 registers. Each event will be
+ * triggered when its related register matches the SCT counter value, and it
+ * will set or clear a selected output.
+ *
+ * One of the events is preselected to generate the period, thus the maximum
+ * number of simultaneous channels is limited to 15. Notice that period is
+ * global to all the channels, thus PWM driver will refuse setting different
+ * values to it, unless there's only one channel requested.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+/* LPC18xx SCT registers */
+#define LPC18XX_PWM_CONFIG 0x000
+#define LPC18XX_PWM_CONFIG_UNIFY BIT(0)
+#define LPC18XX_PWM_CONFIG_NORELOAD BIT(7)
+
+#define LPC18XX_PWM_CTRL 0x004
+#define LPC18XX_PWM_CTRL_HALT BIT(2)
+#define LPC18XX_PWM_BIDIR BIT(4)
+#define LPC18XX_PWM_PRE_SHIFT 5
+#define LPC18XX_PWM_PRE_MASK (0xff << LPC18XX_PWM_PRE_SHIFT)
+#define LPC18XX_PWM_PRE(x) (x << LPC18XX_PWM_PRE_SHIFT)
+
+#define LPC18XX_PWM_LIMIT 0x008
+
+#define LPC18XX_PWM_RES_BASE 0x058
+#define LPC18XX_PWM_RES_SHIFT(_ch) (_ch * 2)
+#define LPC18XX_PWM_RES(_ch, _action) (_action << LPC18XX_PWM_RES_SHIFT(_ch))
+#define LPC18XX_PWM_RES_MASK(_ch) (0x3 << LPC18XX_PWM_RES_SHIFT(_ch))
+
+#define LPC18XX_PWM_MATCH_BASE 0x100
+#define LPC18XX_PWM_MATCH(_ch) (LPC18XX_PWM_MATCH_BASE + _ch * 4)
+
+#define LPC18XX_PWM_MATCHREL_BASE 0x200
+#define LPC18XX_PWM_MATCHREL(_ch) (LPC18XX_PWM_MATCHREL_BASE + _ch * 4)
+
+#define LPC18XX_PWM_EVSTATEMSK_BASE 0x300
+#define LPC18XX_PWM_EVSTATEMSK(_ch) (LPC18XX_PWM_EVSTATEMSK_BASE + _ch * 8)
+#define LPC18XX_PWM_EVSTATEMSK_ALL 0xffffffff
+
+#define LPC18XX_PWM_EVCTRL_BASE 0x304
+#define LPC18XX_PWM_EVCTRL(_ev) (LPC18XX_PWM_EVCTRL_BASE + _ev * 8)
+
+#define LPC18XX_PWM_EVCTRL_MATCH(_ch) _ch
+
+#define LPC18XX_PWM_EVCTRL_COMB_SHIFT 12
+#define LPC18XX_PWM_EVCTRL_COMB_MATCH (0x1 << LPC18XX_PWM_EVCTRL_COMB_SHIFT)
+
+#define LPC18XX_PWM_OUTPUTSET_BASE 0x500
+#define LPC18XX_PWM_OUTPUTSET(_ch) (LPC18XX_PWM_OUTPUTSET_BASE + _ch * 8)
+
+#define LPC18XX_PWM_OUTPUTCL_BASE 0x504
+#define LPC18XX_PWM_OUTPUTCL(_ch) (LPC18XX_PWM_OUTPUTCL_BASE + _ch * 8)
+
+/* LPC18xx SCT unified counter */
+#define LPC18XX_PWM_TIMER_MAX 0xffffffff
+
+/* LPC18xx SCT events */
+#define LPC18XX_PWM_EVENT_PERIOD 0
+#define LPC18XX_PWM_EVENT_MAX 16
+
+/* SCT conflict resolution */
+enum lpc18xx_pwm_res_action {
+ LPC18XX_PWM_RES_NONE,
+ LPC18XX_PWM_RES_SET,
+ LPC18XX_PWM_RES_CLEAR,
+ LPC18XX_PWM_RES_TOGGLE,
+};
+
+struct lpc18xx_pwm_data {
+ unsigned int duty_event;
+};
+
+struct lpc18xx_pwm_chip {
+ struct device *dev;
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *pwm_clk;
+ unsigned long clk_rate;
+ unsigned int period_ns;
+ unsigned int min_period_ns;
+ unsigned int max_period_ns;
+ unsigned int period_event;
+ unsigned long event_map;
+ struct mutex res_lock;
+ struct mutex period_lock;
+};
+
+static inline struct lpc18xx_pwm_chip *
+to_lpc18xx_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct lpc18xx_pwm_chip, chip);
+}
+
+static inline void lpc18xx_pwm_writel(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+ u32 reg, u32 val)
+{
+ writel(val, lpc18xx_pwm->base + reg);
+}
+
+static inline u32 lpc18xx_pwm_readl(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+ u32 reg)
+{
+ return readl(lpc18xx_pwm->base + reg);
+}
+
+static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+ struct pwm_device *pwm,
+ enum lpc18xx_pwm_res_action action)
+{
+ u32 val;
+
+ mutex_lock(&lpc18xx_pwm->res_lock);
+
+ /*
+ * Simultaneous set and clear may happen on an output, that is the case
+ * when duty_ns == period_ns. LPC18xx SCT allows to set a conflict
+ * resolution action to be taken in such a case.
+ */
+ val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_RES_BASE);
+ val &= ~LPC18XX_PWM_RES_MASK(pwm->hwpwm);
+ val |= LPC18XX_PWM_RES(pwm->hwpwm, action);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_RES_BASE, val);
+
+ mutex_unlock(&lpc18xx_pwm->res_lock);
+}
+
+static void lpc18xx_pwm_config_period(struct pwm_chip *chip, int period_ns)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ u64 val;
+
+ val = (u64)period_ns * lpc18xx_pwm->clk_rate;
+ do_div(val, NSEC_PER_SEC);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCH(lpc18xx_pwm->period_event),
+ (u32)val - 1);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCHREL(lpc18xx_pwm->period_event),
+ (u32)val - 1);
+}
+
+static void lpc18xx_pwm_config_duty(struct pwm_chip *chip,
+ struct pwm_device *pwm, int duty_ns)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+ u64 val;
+
+ val = (u64)duty_ns * lpc18xx_pwm->clk_rate;
+ do_div(val, NSEC_PER_SEC);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCH(lpc18xx_data->duty_event),
+ (u32)val);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCHREL(lpc18xx_data->duty_event),
+ (u32)val);
+}
+
+static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ int requested_events, i;
+
+ if (period_ns < lpc18xx_pwm->min_period_ns ||
+ period_ns > lpc18xx_pwm->max_period_ns) {
+ dev_err(chip->dev, "period %d not in range\n", period_ns);
+ return -ERANGE;
+ }
+
+ mutex_lock(&lpc18xx_pwm->period_lock);
+
+ requested_events = bitmap_weight(&lpc18xx_pwm->event_map,
+ LPC18XX_PWM_EVENT_MAX);
+
+ /*
+ * The PWM supports only a single period for all PWM channels.
+ * Once the period is set, it can only be changed if no more than one
+ * channel is requested at that moment.
+ */
+ if (requested_events > 2 && lpc18xx_pwm->period_ns != period_ns &&
+ lpc18xx_pwm->period_ns) {
+ dev_err(chip->dev, "conflicting period requested for PWM %u\n",
+ pwm->hwpwm);
+ mutex_unlock(&lpc18xx_pwm->period_lock);
+ return -EBUSY;
+ }
+
+ if ((requested_events <= 2 && lpc18xx_pwm->period_ns != period_ns) ||
+ !lpc18xx_pwm->period_ns) {
+ lpc18xx_pwm->period_ns = period_ns;
+ for (i = 0; i < chip->npwm; i++)
+ pwm_set_period(&chip->pwms[i], period_ns);
+ lpc18xx_pwm_config_period(chip, period_ns);
+ }
+
+ mutex_unlock(&lpc18xx_pwm->period_lock);
+
+ lpc18xx_pwm_config_duty(chip, pwm, duty_ns);
+
+ return 0;
+}
+
+static int lpc18xx_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ return 0;
+}
+
+static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+ enum lpc18xx_pwm_res_action res_action;
+ unsigned int set_event, clear_event;
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event),
+ LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_data->duty_event) |
+ LPC18XX_PWM_EVCTRL_COMB_MATCH);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
+ LPC18XX_PWM_EVSTATEMSK_ALL);
+
+ if (pwm->polarity == PWM_POLARITY_NORMAL) {
+ set_event = lpc18xx_pwm->period_event;
+ clear_event = lpc18xx_data->duty_event;
+ res_action = LPC18XX_PWM_RES_SET;
+ } else {
+ set_event = lpc18xx_data->duty_event;
+ clear_event = lpc18xx_pwm->period_event;
+ res_action = LPC18XX_PWM_RES_CLEAR;
+ }
+
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm),
+ BIT(set_event));
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm),
+ BIT(clear_event));
+ lpc18xx_pwm_set_conflict_res(lpc18xx_pwm, pwm, res_action);
+
+ return 0;
+}
+
+static void lpc18xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event), 0);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm), 0);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm), 0);
+}
+
+static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+ unsigned long event;
+
+ event = find_first_zero_bit(&lpc18xx_pwm->event_map,
+ LPC18XX_PWM_EVENT_MAX);
+
+ if (event >= LPC18XX_PWM_EVENT_MAX) {
+ dev_err(lpc18xx_pwm->dev,
+ "maximum number of simultaneous channels reached\n");
+ return -EBUSY;
+ };
+
+ set_bit(event, &lpc18xx_pwm->event_map);
+ lpc18xx_data->duty_event = event;
+ lpc18xx_pwm_config_duty(chip, pwm, pwm_get_duty_cycle(pwm));
+
+ return 0;
+}
+
+static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+
+ pwm_disable(pwm);
+ pwm_set_duty_cycle(pwm, 0);
+ clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
+}
+
+static const struct pwm_ops lpc18xx_pwm_ops = {
+ .config = lpc18xx_pwm_config,
+ .set_polarity = lpc18xx_pwm_set_polarity,
+ .enable = lpc18xx_pwm_enable,
+ .disable = lpc18xx_pwm_disable,
+ .request = lpc18xx_pwm_request,
+ .free = lpc18xx_pwm_free,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id lpc18xx_pwm_of_match[] = {
+ { .compatible = "nxp,lpc1850-sct-pwm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_pwm_of_match);
+
+static int lpc18xx_pwm_probe(struct platform_device *pdev)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm;
+ struct pwm_device *pwm;
+ struct resource *res;
+ int ret, i;
+ u64 val;
+
+ lpc18xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*lpc18xx_pwm),
+ GFP_KERNEL);
+ if (!lpc18xx_pwm)
+ return -ENOMEM;
+
+ lpc18xx_pwm->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lpc18xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(lpc18xx_pwm->base))
+ return PTR_ERR(lpc18xx_pwm->base);
+
+ lpc18xx_pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+ if (IS_ERR(lpc18xx_pwm->pwm_clk)) {
+ dev_err(&pdev->dev, "failed to get pwm clock\n");
+ return PTR_ERR(lpc18xx_pwm->pwm_clk);
+ }
+
+ ret = clk_prepare_enable(lpc18xx_pwm->pwm_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
+ return ret;
+ }
+
+ lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
+
+ mutex_init(&lpc18xx_pwm->res_lock);
+ mutex_init(&lpc18xx_pwm->period_lock);
+
+ val = (u64)NSEC_PER_SEC * LPC18XX_PWM_TIMER_MAX;
+ do_div(val, lpc18xx_pwm->clk_rate);
+ lpc18xx_pwm->max_period_ns = val;
+
+ lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC,
+ lpc18xx_pwm->clk_rate);
+
+ lpc18xx_pwm->chip.dev = &pdev->dev;
+ lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
+ lpc18xx_pwm->chip.base = -1;
+ lpc18xx_pwm->chip.npwm = 16;
+ lpc18xx_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
+ lpc18xx_pwm->chip.of_pwm_n_cells = 3;
+
+ /* SCT counter must be in unify (32 bit) mode */
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
+ LPC18XX_PWM_CONFIG_UNIFY);
+
+ /*
+ * Everytime the timer counter reaches the period value, the related
+ * event will be triggered and the counter reset to 0.
+ */
+ set_bit(LPC18XX_PWM_EVENT_PERIOD, &lpc18xx_pwm->event_map);
+ lpc18xx_pwm->period_event = LPC18XX_PWM_EVENT_PERIOD;
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVSTATEMSK(lpc18xx_pwm->period_event),
+ LPC18XX_PWM_EVSTATEMSK_ALL);
+
+ val = LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_pwm->period_event) |
+ LPC18XX_PWM_EVCTRL_COMB_MATCH;
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVCTRL(lpc18xx_pwm->period_event), val);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT,
+ BIT(lpc18xx_pwm->period_event));
+
+ ret = pwmchip_add(&lpc18xx_pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
+ goto disable_pwmclk;
+ }
+
+ for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
+ pwm = &lpc18xx_pwm->chip.pwms[i];
+ pwm->chip_data = devm_kzalloc(lpc18xx_pwm->dev,
+ sizeof(struct lpc18xx_pwm_data),
+ GFP_KERNEL);
+ if (!pwm->chip_data) {
+ ret = -ENOMEM;
+ goto remove_pwmchip;
+ }
+ }
+
+ platform_set_drvdata(pdev, lpc18xx_pwm);
+
+ val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
+ val &= ~LPC18XX_PWM_BIDIR;
+ val &= ~LPC18XX_PWM_CTRL_HALT;
+ val &= ~LPC18XX_PWM_PRE_MASK;
+ val |= LPC18XX_PWM_PRE(0);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
+
+ return 0;
+
+remove_pwmchip:
+ pwmchip_remove(&lpc18xx_pwm->chip);
+disable_pwmclk:
+ clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
+ return ret;
+}
+
+static int lpc18xx_pwm_remove(struct platform_device *pdev)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
+ u32 val;
+
+ val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
+ val | LPC18XX_PWM_CTRL_HALT);
+
+ clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
+
+ return pwmchip_remove(&lpc18xx_pwm->chip);
+}
+
+static struct platform_driver lpc18xx_pwm_driver = {
+ .driver = {
+ .name = "lpc18xx-sct-pwm",
+ .of_match_table = lpc18xx_pwm_of_match,
+ },
+ .probe = lpc18xx_pwm_probe,
+ .remove = lpc18xx_pwm_remove,
+};
+module_platform_driver(lpc18xx_pwm_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx PWM driver");
+MODULE_LICENSE("GPL v2");
* If the PWM channel is disabled, make sure to turn on the clock
* before writing the register. Otherwise, keep it enabled.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
ret = clk_prepare_enable(mxs->clk);
if (ret)
return ret;
/*
* If the PWM is not enabled, turn the clock off again to save power.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
clk_disable_unprepare(mxs->clk);
return 0;
* Driver for PCA9685 16-channel 12-bit PWM LED controller
*
* Copyright (C) 2013 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ * Copyright (C) 2015 Clemens Gruber <clemens.gruber@pqgruber.com>
*
* based on the pwm-twl-led.c driver
*
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+
+/*
+ * Because the PCA9685 has only one prescaler per chip, changing the period of
+ * one channel affects the period of all 16 PWM outputs!
+ * However, the ratio between each configured duty cycle and the chip-wide
+ * period remains constant, because the OFF time is set in proportion to the
+ * counter range.
+ */
#define PCA9685_MODE1 0x00
#define PCA9685_MODE2 0x01
#define PCA9685_ALL_LED_OFF_H 0xFD
#define PCA9685_PRESCALE 0xFE
+#define PCA9685_PRESCALE_MIN 0x03 /* => max. frequency of 1526 Hz */
+#define PCA9685_PRESCALE_MAX 0xFF /* => min. frequency of 24 Hz */
+
+#define PCA9685_COUNTER_RANGE 4096
+#define PCA9685_DEFAULT_PERIOD 5000000 /* Default period_ns = 1/200 Hz */
+#define PCA9685_OSC_CLOCK_MHZ 25 /* Internal oscillator with 25 MHz */
+
#define PCA9685_NUMREGS 0xFF
#define PCA9685_MAXCHAN 0x10
#define LED_FULL (1 << 4)
+#define MODE1_RESTART (1 << 7)
#define MODE1_SLEEP (1 << 4)
#define MODE2_INVRT (1 << 4)
#define MODE2_OUTDRV (1 << 2)
struct pwm_chip chip;
struct regmap *regmap;
int active_cnt;
+ int duty_ns;
+ int period_ns;
};
static inline struct pca9685 *to_pca(struct pwm_chip *chip)
struct pca9685 *pca = to_pca(chip);
unsigned long long duty;
unsigned int reg;
+ int prescale;
+
+ if (period_ns != pca->period_ns) {
+ prescale = DIV_ROUND_CLOSEST(PCA9685_OSC_CLOCK_MHZ * period_ns,
+ PCA9685_COUNTER_RANGE * 1000) - 1;
+
+ if (prescale >= PCA9685_PRESCALE_MIN &&
+ prescale <= PCA9685_PRESCALE_MAX) {
+ /* Put chip into sleep mode */
+ regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_SLEEP, MODE1_SLEEP);
+
+ /* Change the chip-wide output frequency */
+ regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
+
+ /* Wake the chip up */
+ regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_SLEEP, 0x0);
+
+ /* Wait 500us for the oscillator to be back up */
+ udelay(500);
+
+ pca->period_ns = period_ns;
+
+ /*
+ * If the duty cycle did not change, restart PWM with
+ * the same duty cycle to period ratio and return.
+ */
+ if (duty_ns == pca->duty_ns) {
+ regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_RESTART, 0x1);
+ return 0;
+ }
+ } else {
+ dev_err(chip->dev,
+ "prescaler not set: period out of bounds!\n");
+ return -EINVAL;
+ }
+ }
+
+ pca->duty_ns = duty_ns;
if (duty_ns < 1) {
if (pwm->hwpwm >= PCA9685_MAXCHAN)
}
if (duty_ns == period_ns) {
+ /* Clear both OFF registers */
+ if (pwm->hwpwm >= PCA9685_MAXCHAN)
+ reg = PCA9685_ALL_LED_OFF_L;
+ else
+ reg = LED_N_OFF_L(pwm->hwpwm);
+
+ regmap_write(pca->regmap, reg, 0x0);
+
+ if (pwm->hwpwm >= PCA9685_MAXCHAN)
+ reg = PCA9685_ALL_LED_OFF_H;
+ else
+ reg = LED_N_OFF_H(pwm->hwpwm);
+
+ regmap_write(pca->regmap, reg, 0x0);
+
+ /* Set the full ON bit */
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_ON_H;
else
return 0;
}
- duty = 4096 * (unsigned long long)duty_ns;
+ duty = PCA9685_COUNTER_RANGE * (unsigned long long)duty_ns;
duty = DIV_ROUND_UP_ULL(duty, period_ns);
if (pwm->hwpwm >= PCA9685_MAXCHAN)
regmap_write(pca->regmap, reg, ((int)duty >> 8) & 0xf);
+ /* Clear the full ON bit, otherwise the set OFF time has no effect */
+ if (pwm->hwpwm >= PCA9685_MAXCHAN)
+ reg = PCA9685_ALL_LED_ON_H;
+ else
+ reg = LED_N_ON_H(pwm->hwpwm);
+
+ regmap_write(pca->regmap, reg, 0);
+
return 0;
}
ret);
return ret;
}
+ pca->duty_ns = 0;
+ pca->period_ns = PCA9685_DEFAULT_PERIOD;
i2c_set_clientdata(client, pca);
static struct i2c_driver pca9685_i2c_driver = {
.driver = {
.name = "pca9685-pwm",
- .owner = THIS_MODULE,
.of_match_table = pca9685_dt_ids,
},
.probe = pca9685_pwm_probe,
pwm->duty = duty;
/* If the channel is disabled we're done. */
- if (!test_bit(PWMF_ENABLED, &_pwm->flags))
+ if (!pwm_is_enabled(_pwm))
return 0;
if (duty_only && pwm->timer_on) {
PWM_CONTINUOUS;
u32 val;
- if (pwm->polarity == PWM_POLARITY_INVERSED)
+ if (pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED)
enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
else
enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
* If the PWM channel is disabled, make sure to turn on the clock
* before writing the register. Otherwise, keep it enabled.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
err = clk_prepare_enable(pc->clk);
if (err < 0)
return err;
/*
* If the PWM is not enabled, turn the clock off again to save power.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
clk_disable_unprepare(pc->clk);
return 0;
for (i = 0; i < NUM_PWM; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
if (clk_prepare_enable(pc->clk) < 0)
continue;
writew(reg_val, pc->mmio_base + ECCTL2);
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
/* Update active registers if not running */
writel(duty_cycles, pc->mmio_base + CAP2);
writel(period_cycles, pc->mmio_base + CAP1);
writel(period_cycles, pc->mmio_base + CAP3);
}
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
reg_val = readw(pc->mmio_base + ECCTL2);
/* Disable APWM mode to put APWM output Low */
reg_val &= ~ECCTL2_APWM_MODE;
static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (pwm_is_enabled(pwm)) {
dev_warn(chip->dev, "Removing PWM device without disabling\n");
pm_runtime_put_sync(chip->dev);
}
ecap_pwm_save_context(pc);
/* Disable explicitly if PWM is running */
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
pm_runtime_put_sync(dev);
return 0;
struct pwm_device *pwm = pc->chip.pwms;
/* Enable explicitly if PWM was running */
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
pm_runtime_get_sync(dev);
ecap_pwm_restore_context(pc);
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (pwm_is_enabled(pwm)) {
dev_warn(chip->dev, "Removing PWM device without disabling\n");
pm_runtime_put_sync(chip->dev);
}
for (i = 0; i < pc->chip.npwm; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
continue;
/* Disable explicitly if PWM is running */
for (i = 0; i < pc->chip.npwm; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
continue;
/* Enable explicitly if PWM was running */
{
const struct pwm_device *pwm = child_to_pwm_device(child);
- return sprintf(buf, "%u\n", pwm->period);
+ return sprintf(buf, "%u\n", pwm_get_period(pwm));
}
static ssize_t pwm_period_store(struct device *child,
if (ret)
return ret;
- ret = pwm_config(pwm, pwm->duty_cycle, val);
+ ret = pwm_config(pwm, pwm_get_duty_cycle(pwm), val);
return ret ? : size;
}
{
const struct pwm_device *pwm = child_to_pwm_device(child);
- return sprintf(buf, "%u\n", pwm->duty_cycle);
+ return sprintf(buf, "%u\n", pwm_get_duty_cycle(pwm));
}
static ssize_t pwm_duty_cycle_store(struct device *child,
if (ret)
return ret;
- ret = pwm_config(pwm, val, pwm->period);
+ ret = pwm_config(pwm, val, pwm_get_period(pwm));
return ret ? : size;
}
char *buf)
{
const struct pwm_device *pwm = child_to_pwm_device(child);
- int enabled = test_bit(PWMF_ENABLED, &pwm->flags);
+ int enabled = pwm_is_enabled(pwm);
return sprintf(buf, "%d\n", enabled);
}
char *buf)
{
const struct pwm_device *pwm = child_to_pwm_device(child);
+ const char *polarity = "unknown";
- return sprintf(buf, "%s\n", pwm->polarity ? "inversed" : "normal");
+ switch (pwm_get_polarity(pwm)) {
+ case PWM_POLARITY_NORMAL:
+ polarity = "normal";
+ break;
+
+ case PWM_POLARITY_INVERSED:
+ polarity = "inversed";
+ break;
+ }
+
+ return sprintf(buf, "%s\n", polarity);
}
static ssize_t pwm_polarity_store(struct device *child,
ATTRIBUTE_GROUPS(pwm_chip);
static struct class pwm_class = {
- .name = "pwm",
- .owner = THIS_MODULE,
- .dev_groups = pwm_chip_groups,
+ .name = "pwm",
+ .owner = THIS_MODULE,
+ .dev_groups = pwm_chip_groups,
};
static int pwmchip_sysfs_match(struct device *parent, const void *data)
if (IS_ERR(ath79_reset->base))
return PTR_ERR(ath79_reset->base);
+ spin_lock_init(&ath79_reset->lock);
ath79_reset->rcdev.ops = &ath79_reset_ops;
ath79_reset->rcdev.owner = THIS_MODULE;
ath79_reset->rcdev.of_node = pdev->dev.of_node;
unsigned int data_len = scsi_bufflen(sc);
unsigned int data_grants = 0, seg_grants = 0;
struct scatterlist *sg;
- unsigned long mfn;
struct scsiif_request_segment *seg;
ring_req->nr_segments = 0;
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
- mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
- info->dev->otherend_id, mfn, 1);
+ info->dev->otherend_id,
+ xen_page_to_gfn(page), 1);
shadow->gref[ref_cnt] = ref;
ring_req->seg[ref_cnt].gref = ref;
ring_req->seg[ref_cnt].offset = (uint16_t)off;
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
- mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
- info->dev->otherend_id, mfn, grant_ro);
+ info->dev->otherend_id,
+ xen_page_to_gfn(page),
+ grant_ro);
shadow->gref[ref_cnt] = ref;
seg->gref = ref;
* @smd: handle to qcom_smd
* @of_node: of_node handle for information related to this edge
* @edge_id: identifier of this edge
+ * @remote_pid: identifier of remote processor
* @irq: interrupt for signals on this edge
* @ipc_regmap: regmap handle holding the outgoing ipc register
* @ipc_offset: offset within @ipc_regmap of the register for ipc
struct qcom_smd *smd;
struct device_node *of_node;
unsigned edge_id;
+ unsigned remote_pid;
int irq;
SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
- SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
SET_TX_CHANNEL_INFO(channel, head, 0);
SET_TX_CHANNEL_INFO(channel, tail, 0);
* have to scan if the amount of available space in smem have changed
* since last scan.
*/
- available = qcom_smem_get_free_space(edge->edge_id);
+ available = qcom_smem_get_free_space(edge->remote_pid);
if (available != edge->smem_available) {
edge->smem_available = available;
edge->need_rescan = true;
goto out;
}
- SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
ret = wait_event_interruptible(channel->fblockread_event,
qcom_smd_get_tx_avail(channel) >= tlen ||
if (ret)
goto out;
- SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
}
SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
spin_lock_init(&channel->recv_lock);
init_waitqueue_head(&channel->fblockread_event);
- ret = qcom_smem_get(edge->edge_id, smem_info_item, (void **)&info, &info_size);
+ ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info,
+ &info_size);
if (ret)
goto free_name_and_channel;
goto free_name_and_channel;
}
- ret = qcom_smem_get(edge->edge_id, smem_fifo_item, &fifo_base, &fifo_size);
+ ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base,
+ &fifo_size);
if (ret)
goto free_name_and_channel;
int i;
for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
- ret = qcom_smem_get(edge->edge_id,
+ ret = qcom_smem_get(edge->remote_pid,
smem_items[tbl].alloc_tbl_id,
(void **)&alloc_tbl,
NULL);
return -EINVAL;
}
+ edge->remote_pid = QCOM_SMEM_HOST_ANY;
+ key = "qcom,remote-pid";
+ of_property_read_u32(node, key, &edge->remote_pid);
+
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
if (!syscon_np) {
dev_err(dev, "no qcom,ipc node\n");
size_t alloc_size;
void *p;
- /* We're not going to find it if there's no matching partition */
- if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
- return -ENOENT;
-
phdr = smem->partitions[host];
p = (void *)phdr + sizeof(*phdr);
if (ret)
return ret;
- ret = qcom_smem_alloc_private(__smem, host, item, size);
- if (ret == -ENOENT)
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+ ret = qcom_smem_alloc_private(__smem, host, item, size);
+ else
ret = qcom_smem_alloc_global(__smem, item, size);
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
struct smem_private_entry *hdr;
void *p;
- /* We're not going to find it if there's no matching partition */
- if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
- return -ENOENT;
-
phdr = smem->partitions[host];
p = (void *)phdr + sizeof(*phdr);
if (ret)
return ret;
- ret = qcom_smem_get_private(__smem, host, item, ptr, size);
- if (ret == -ENOENT)
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+ ret = qcom_smem_get_private(__smem, host, item, ptr, size);
+ else
ret = qcom_smem_get_global(__smem, item, ptr, size);
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
{
int r;
uint64_t v = 0;
- unsigned long mfn;
+ unsigned long gfn;
struct xencons_info *info;
if (!xen_hvm_domain())
}
/*
* If the toolstack (or the hypervisor) hasn't set these values, the
- * default value is 0. Even though mfn = 0 and evtchn = 0 are
+ * default value is 0. Even though gfn = 0 and evtchn = 0 are
* theoretically correct values, in practice they never are and they
* mean that a legacy toolstack hasn't initialized the pv console correctly.
*/
r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
if (r < 0 || v == 0)
goto err;
- mfn = v;
- info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE);
+ gfn = v;
+ info->intf = xen_remap(gfn << PAGE_SHIFT, PAGE_SIZE);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
return 0;
}
info->evtchn = xen_start_info->console.domU.evtchn;
- info->intf = mfn_to_virt(xen_start_info->console.domU.mfn);
+ /* GFN == MFN for PV guest */
+ info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
info->vtermno = HVC_COOKIE;
spin_lock(&xencons_lock);
int ret, evtchn, devid, ref, irq;
struct xenbus_transaction xbt;
grant_ref_t gref_head;
- unsigned long mfn;
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
irq, &domU_hvc_ops, 256);
if (IS_ERR(info->hvc))
return PTR_ERR(info->hvc);
- if (xen_pv_domain())
- mfn = virt_to_mfn(info->intf);
- else
- mfn = __pa(info->intf) >> PAGE_SHIFT;
ret = gnttab_alloc_grant_references(1, &gref_head);
if (ret < 0)
return ret;
if (ref < 0)
return ref;
gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
- mfn, 0);
+ virt_to_gfn(info->intf), 0);
again:
ret = xenbus_transaction_start(&xbt);
config SERIAL_AMBA_PL011
tristate "ARM AMBA PL011 serial port support"
- depends on ARM_AMBA || SOC_ZX296702
+ depends on ARM_AMBA
select SERIAL_CORE
help
This selects the ARM(R) AMBA(R) PrimeCell PL011 UART. If you have
an Integrator/PP2, Integrator/CP or Versatile platform, say Y or M
- here. Say Y or M if you have SOC_ZX296702.
+ here.
If unsure, say N.
/* There is by now at least one vendor with differing details, so handle it */
struct vendor_data {
unsigned int ifls;
- unsigned int fr_busy;
- unsigned int fr_dsr;
- unsigned int fr_cts;
- unsigned int fr_ri;
unsigned int lcrh_tx;
unsigned int lcrh_rx;
- u16 *reg_lut;
bool oversampling;
bool dma_threshold;
bool cts_event_workaround;
unsigned int (*get_fifosize)(struct amba_device *dev);
};
-/* Max address offset of register in use is 0x48 */
-#define REG_NR (0x48 >> 2)
-#define IDX(x) (x >> 2)
-enum reg_idx {
- REG_DR = IDX(UART01x_DR),
- REG_RSR = IDX(UART01x_RSR),
- REG_ST_DMAWM = IDX(ST_UART011_DMAWM),
- REG_FR = IDX(UART01x_FR),
- REG_ST_LCRH_RX = IDX(ST_UART011_LCRH_RX),
- REG_ILPR = IDX(UART01x_ILPR),
- REG_IBRD = IDX(UART011_IBRD),
- REG_FBRD = IDX(UART011_FBRD),
- REG_LCRH = IDX(UART011_LCRH),
- REG_CR = IDX(UART011_CR),
- REG_IFLS = IDX(UART011_IFLS),
- REG_IMSC = IDX(UART011_IMSC),
- REG_RIS = IDX(UART011_RIS),
- REG_MIS = IDX(UART011_MIS),
- REG_ICR = IDX(UART011_ICR),
- REG_DMACR = IDX(UART011_DMACR),
-};
-
-static u16 arm_reg[] = {
- [REG_DR] = UART01x_DR,
- [REG_RSR] = UART01x_RSR,
- [REG_ST_DMAWM] = ~0,
- [REG_FR] = UART01x_FR,
- [REG_ST_LCRH_RX] = ~0,
- [REG_ILPR] = UART01x_ILPR,
- [REG_IBRD] = UART011_IBRD,
- [REG_FBRD] = UART011_FBRD,
- [REG_LCRH] = UART011_LCRH,
- [REG_CR] = UART011_CR,
- [REG_IFLS] = UART011_IFLS,
- [REG_IMSC] = UART011_IMSC,
- [REG_RIS] = UART011_RIS,
- [REG_MIS] = UART011_MIS,
- [REG_ICR] = UART011_ICR,
- [REG_DMACR] = UART011_DMACR,
-};
-
-#ifdef CONFIG_ARM_AMBA
static unsigned int get_fifosize_arm(struct amba_device *dev)
{
return amba_rev(dev) < 3 ? 16 : 32;
static struct vendor_data vendor_arm = {
.ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- .fr_busy = UART01x_FR_BUSY,
- .fr_dsr = UART01x_FR_DSR,
- .fr_cts = UART01x_FR_CTS,
- .fr_ri = UART011_FR_RI,
- .lcrh_tx = REG_LCRH,
- .lcrh_rx = REG_LCRH,
- .reg_lut = arm_reg,
+ .lcrh_tx = UART011_LCRH,
+ .lcrh_rx = UART011_LCRH,
.oversampling = false,
.dma_threshold = false,
.cts_event_workaround = false,
.fixed_options = false,
.get_fifosize = get_fifosize_arm,
};
-#endif
static struct vendor_data vendor_sbsa = {
- .fr_busy = UART01x_FR_BUSY,
- .fr_dsr = UART01x_FR_DSR,
- .fr_cts = UART01x_FR_CTS,
- .fr_ri = UART011_FR_RI,
- .reg_lut = arm_reg,
.oversampling = false,
.dma_threshold = false,
.cts_event_workaround = false,
.fixed_options = true,
};
-#ifdef CONFIG_ARM_AMBA
-static u16 st_reg[] = {
- [REG_DR] = UART01x_DR,
- [REG_RSR] = UART01x_RSR,
- [REG_ST_DMAWM] = ST_UART011_DMAWM,
- [REG_FR] = UART01x_FR,
- [REG_ST_LCRH_RX] = ST_UART011_LCRH_RX,
- [REG_ILPR] = UART01x_ILPR,
- [REG_IBRD] = UART011_IBRD,
- [REG_FBRD] = UART011_FBRD,
- [REG_LCRH] = UART011_LCRH,
- [REG_CR] = UART011_CR,
- [REG_IFLS] = UART011_IFLS,
- [REG_IMSC] = UART011_IMSC,
- [REG_RIS] = UART011_RIS,
- [REG_MIS] = UART011_MIS,
- [REG_ICR] = UART011_ICR,
- [REG_DMACR] = UART011_DMACR,
-};
-
static unsigned int get_fifosize_st(struct amba_device *dev)
{
return 64;
static struct vendor_data vendor_st = {
.ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
- .fr_busy = UART01x_FR_BUSY,
- .fr_dsr = UART01x_FR_DSR,
- .fr_cts = UART01x_FR_CTS,
- .fr_ri = UART011_FR_RI,
- .lcrh_tx = REG_LCRH,
- .lcrh_rx = REG_ST_LCRH_RX,
- .reg_lut = st_reg,
+ .lcrh_tx = ST_UART011_LCRH_TX,
+ .lcrh_rx = ST_UART011_LCRH_RX,
.oversampling = true,
.dma_threshold = true,
.cts_event_workaround = true,
.fixed_options = false,
.get_fifosize = get_fifosize_st,
};
-#endif
-
-#ifdef CONFIG_SOC_ZX296702
-static u16 zte_reg[] = {
- [REG_DR] = ZX_UART01x_DR,
- [REG_RSR] = UART01x_RSR,
- [REG_ST_DMAWM] = ST_UART011_DMAWM,
- [REG_FR] = ZX_UART01x_FR,
- [REG_ST_LCRH_RX] = ST_UART011_LCRH_RX,
- [REG_ILPR] = UART01x_ILPR,
- [REG_IBRD] = UART011_IBRD,
- [REG_FBRD] = UART011_FBRD,
- [REG_LCRH] = ZX_UART011_LCRH_TX,
- [REG_CR] = ZX_UART011_CR,
- [REG_IFLS] = ZX_UART011_IFLS,
- [REG_IMSC] = ZX_UART011_IMSC,
- [REG_RIS] = ZX_UART011_RIS,
- [REG_MIS] = ZX_UART011_MIS,
- [REG_ICR] = ZX_UART011_ICR,
- [REG_DMACR] = ZX_UART011_DMACR,
-};
-
-static struct vendor_data vendor_zte = {
- .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- .fr_busy = ZX_UART01x_FR_BUSY,
- .fr_dsr = ZX_UART01x_FR_DSR,
- .fr_cts = ZX_UART01x_FR_CTS,
- .fr_ri = ZX_UART011_FR_RI,
- .lcrh_tx = REG_LCRH,
- .lcrh_rx = REG_ST_LCRH_RX,
- .reg_lut = zte_reg,
- .oversampling = false,
- .dma_threshold = false,
- .cts_event_workaround = false,
- .fixed_options = false,
-};
-#endif
/* Deals with DMA transactions */
struct uart_port port;
struct clk *clk;
const struct vendor_data *vendor;
- u16 *reg_lut;
unsigned int dmacr; /* dma control reg */
unsigned int im; /* interrupt mask */
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
- unsigned int fr_busy; /* vendor-specific */
- unsigned int fr_dsr; /* vendor-specific */
- unsigned int fr_cts; /* vendor-specific */
- unsigned int fr_ri; /* vendor-specific */
unsigned int lcrh_tx; /* vendor-specific */
unsigned int lcrh_rx; /* vendor-specific */
unsigned int old_cr; /* state during shutdown */
#endif
};
-static bool is_implemented(struct uart_amba_port *uap, unsigned int reg)
-{
- return uap->reg_lut[reg] != (u16)~0;
-}
-
-static unsigned int pl011_readw(struct uart_amba_port *uap, int index)
-{
- WARN_ON(index > REG_NR);
- return readw_relaxed(uap->port.membase + uap->reg_lut[index]);
-}
-
-static void pl011_writew(struct uart_amba_port *uap, int val, int index)
-{
- WARN_ON(index > REG_NR);
- writew_relaxed(val, uap->port.membase + uap->reg_lut[index]);
-}
-
-static void pl011_writeb(struct uart_amba_port *uap, u8 val, int index)
-{
- WARN_ON(index > REG_NR);
- writeb_relaxed(val, uap->port.membase + uap->reg_lut[index]);
-}
-
/*
* Reads up to 256 characters from the FIFO or until it's empty and
* inserts them into the TTY layer. Returns the number of characters
int fifotaken = 0;
while (max_count--) {
- status = pl011_readw(uap, REG_FR);
+ status = readw(uap->port.membase + UART01x_FR);
if (status & UART01x_FR_RXFE)
break;
/* Take chars from the FIFO and update status */
- ch = pl011_readw(uap, REG_DR) |
+ ch = readw(uap->port.membase + UART01x_DR) |
UART_DUMMY_DR_RX;
flag = TTY_NORMAL;
uap->port.icount.rx++;
struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
struct device *dev = uap->port.dev;
struct dma_slave_config tx_conf = {
- .dst_addr = uap->port.mapbase + uap->reg_lut[REG_DR],
+ .dst_addr = uap->port.mapbase + UART01x_DR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.direction = DMA_MEM_TO_DEV,
.dst_maxburst = uap->fifosize >> 1,
if (chan) {
struct dma_slave_config rx_conf = {
- .src_addr = uap->port.mapbase + uap->reg_lut[REG_DR],
+ .src_addr = uap->port.mapbase + UART01x_DR,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.direction = DMA_DEV_TO_MEM,
.src_maxburst = uap->fifosize >> 2,
dmacr = uap->dmacr;
uap->dmacr = dmacr & ~UART011_TXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
/*
* If TX DMA was disabled, it means that we've stopped the DMA for
dma_dev->device_issue_pending(chan);
uap->dmacr |= UART011_TXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
uap->dmatx.queued = true;
/*
*/
if (uap->dmatx.queued) {
uap->dmacr |= UART011_TXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
uap->im &= ~UART011_TXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
return true;
}
*/
if (pl011_dma_tx_refill(uap) > 0) {
uap->im &= ~UART011_TXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
return true;
}
return false;
{
if (uap->dmatx.queued) {
uap->dmacr &= ~UART011_TXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
}
}
if (!uap->dmatx.queued) {
if (pl011_dma_tx_refill(uap) > 0) {
uap->im &= ~UART011_TXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase +
+ UART011_IMSC);
} else
ret = false;
} else if (!(uap->dmacr & UART011_TXDMAE)) {
uap->dmacr |= UART011_TXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr,
+ uap->port.membase + UART011_DMACR);
}
return ret;
}
*/
dmacr = uap->dmacr;
uap->dmacr &= ~UART011_TXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
- if (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF) {
+ if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
/*
* No space in the FIFO, so enable the transmit interrupt
* so we know when there is space. Note that once we've
return false;
}
- pl011_writew(uap, uap->port.x_char, REG_DR);
+ writew(uap->port.x_char, uap->port.membase + UART01x_DR);
uap->port.icount.tx++;
uap->port.x_char = 0;
/* Success - restore the DMA state */
uap->dmacr = dmacr;
- pl011_writew(uap, dmacr, REG_DMACR);
+ writew(dmacr, uap->port.membase + UART011_DMACR);
return true;
}
DMA_TO_DEVICE);
uap->dmatx.queued = false;
uap->dmacr &= ~UART011_TXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
}
}
dma_async_issue_pending(rxchan);
uap->dmacr |= UART011_RXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
uap->dmarx.running = true;
uap->im &= ~UART011_RXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
return 0;
}
*/
if (dma_count == pending && readfifo) {
/* Clear any error flags */
- pl011_writew(uap,
- UART011_OEIS | UART011_BEIS | UART011_PEIS
- | UART011_FEIS, REG_ICR);
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
+ uap->port.membase + UART011_ICR);
/*
* If we read all the DMA'd characters, and we had an
/* Disable RX DMA - incoming data will wait in the FIFO */
uap->dmacr &= ~UART011_RXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
uap->dmarx.running = false;
pending = sgbuf->sg.length - state.residue;
dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
"fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
}
}
dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
"fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
}
}
{
/* FIXME. Just disable the DMA enable */
uap->dmacr &= ~UART011_RXDMAE;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
}
/*
spin_lock_irqsave(&uap->port.lock, flags);
pl011_dma_rx_stop(uap);
uap->im |= UART011_RXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irqrestore(&uap->port.lock, flags);
uap->dmarx.running = false;
skip_rx:
/* Turn on DMA error (RX/TX will be enabled on demand) */
uap->dmacr |= UART011_DMAONERR;
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
/*
* ST Micro variants has some specific dma burst threshold
* be issued above/below 16 bytes.
*/
if (uap->vendor->dma_threshold)
- pl011_writew(uap,
- ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
- REG_ST_DMAWM);
+ writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
+ uap->port.membase + ST_UART011_DMAWM);
if (uap->using_rx_dma) {
if (pl011_dma_rx_trigger_dma(uap))
return;
/* Disable RX and TX DMA */
- while (pl011_readw(uap, REG_FR) & uap->fr_busy)
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
barrier();
spin_lock_irq(&uap->port.lock);
uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
- pl011_writew(uap, uap->dmacr, REG_DMACR);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
spin_unlock_irq(&uap->port.lock);
if (uap->using_tx_dma) {
container_of(port, struct uart_amba_port, port);
uap->im &= ~UART011_TXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
pl011_dma_tx_stop(uap);
}
static void pl011_start_tx_pio(struct uart_amba_port *uap)
{
uap->im |= UART011_TXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
pl011_tx_chars(uap, false);
}
uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
UART011_PEIM|UART011_BEIM|UART011_OEIM);
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
pl011_dma_rx_stop(uap);
}
container_of(port, struct uart_amba_port, port);
uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
}
static void pl011_rx_chars(struct uart_amba_port *uap)
dev_dbg(uap->port.dev, "could not trigger RX DMA job "
"fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
} else {
#ifdef CONFIG_DMA_ENGINE
/* Start Rx DMA poll */
bool from_irq)
{
if (unlikely(!from_irq) &&
- pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+ readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
return false; /* unable to transmit character */
- pl011_writew(uap, c, REG_DR);
+ writew(c, uap->port.membase + UART01x_DR);
uap->port.icount.tx++;
return true;
{
unsigned int status, delta;
- status = pl011_readw(uap, REG_FR) & UART01x_FR_MODEM_ANY;
+ status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
delta = status ^ uap->old_status;
uap->old_status = status;
if (delta & UART01x_FR_DCD)
uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
- if (delta & uap->fr_dsr)
+ if (delta & UART01x_FR_DSR)
uap->port.icount.dsr++;
- if (delta & uap->fr_cts)
- uart_handle_cts_change(&uap->port, status & uap->fr_cts);
+ if (delta & UART01x_FR_CTS)
+ uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
}
return;
/* workaround to make sure that all bits are unlocked.. */
- pl011_writew(uap, 0x00, REG_ICR);
+ writew(0x00, uap->port.membase + UART011_ICR);
/*
* WA: introduce 26ns(1 uart clk) delay before W1C;
* single apb access will incur 2 pclk(133.12Mhz) delay,
* so add 2 dummy reads
*/
- dummy_read = pl011_readw(uap, REG_ICR);
- dummy_read = pl011_readw(uap, REG_ICR);
+ dummy_read = readw(uap->port.membase + UART011_ICR);
+ dummy_read = readw(uap->port.membase + UART011_ICR);
}
static irqreturn_t pl011_int(int irq, void *dev_id)
int handled = 0;
spin_lock_irqsave(&uap->port.lock, flags);
- imsc = pl011_readw(uap, REG_IMSC);
- status = pl011_readw(uap, REG_RIS) & imsc;
+ imsc = readw(uap->port.membase + UART011_IMSC);
+ status = readw(uap->port.membase + UART011_RIS) & imsc;
if (status) {
do {
check_apply_cts_event_workaround(uap);
- pl011_writew(uap, status & ~(UART011_TXIS|UART011_RTIS|
- UART011_RXIS), REG_ICR);
+
+ writew(status & ~(UART011_TXIS|UART011_RTIS|
+ UART011_RXIS),
+ uap->port.membase + UART011_ICR);
if (status & (UART011_RTIS|UART011_RXIS)) {
if (pl011_dma_rx_running(uap))
if (pass_counter-- == 0)
break;
- status = pl011_readw(uap, REG_RIS) & imsc;
+ status = readw(uap->port.membase + UART011_RIS) & imsc;
} while (status != 0);
handled = 1;
}
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- unsigned int status = pl011_readw(uap, REG_FR);
- return status & (uap->fr_busy|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
+ unsigned int status = readw(uap->port.membase + UART01x_FR);
+ return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
}
static unsigned int pl011_get_mctrl(struct uart_port *port)
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int result = 0;
- unsigned int status = pl011_readw(uap, REG_FR);
+ unsigned int status = readw(uap->port.membase + UART01x_FR);
#define TIOCMBIT(uartbit, tiocmbit) \
if (status & uartbit) \
result |= tiocmbit
TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
- TIOCMBIT(uap->fr_dsr, TIOCM_DSR);
- TIOCMBIT(uap->fr_cts, TIOCM_CTS);
- TIOCMBIT(uap->fr_ri, TIOCM_RNG);
+ TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
+ TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
+ TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
#undef TIOCMBIT
return result;
}
container_of(port, struct uart_amba_port, port);
unsigned int cr;
- cr = pl011_readw(uap, REG_CR);
+ cr = readw(uap->port.membase + UART011_CR);
#define TIOCMBIT(tiocmbit, uartbit) \
if (mctrl & tiocmbit) \
}
#undef TIOCMBIT
- pl011_writew(uap, cr, REG_CR);
+ writew(cr, uap->port.membase + UART011_CR);
}
static void pl011_break_ctl(struct uart_port *port, int break_state)
unsigned int lcr_h;
spin_lock_irqsave(&uap->port.lock, flags);
- lcr_h = pl011_readw(uap, uap->lcrh_tx);
+ lcr_h = readw(uap->port.membase + uap->lcrh_tx);
if (break_state == -1)
lcr_h |= UART01x_LCRH_BRK;
else
lcr_h &= ~UART01x_LCRH_BRK;
- pl011_writew(uap, lcr_h, uap->lcrh_tx);
+ writew(lcr_h, uap->port.membase + uap->lcrh_tx);
spin_unlock_irqrestore(&uap->port.lock, flags);
}
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ unsigned char __iomem *regs = uap->port.membase;
- pl011_writew(uap, pl011_readw(uap, REG_MIS), REG_ICR);
+ writew(readw(regs + UART011_MIS), regs + UART011_ICR);
/*
* There is no way to clear TXIM as this is "ready to transmit IRQ", so
* we simply mask it. start_tx() will unmask it.
* (including tx queue), so we're also fine with start_tx()'s caller
* side.
*/
- pl011_writew(uap, pl011_readw(uap, REG_IMSC) & ~UART011_TXIM, REG_IMSC);
+ writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
}
static int pl011_get_poll_char(struct uart_port *port)
*/
pl011_quiesce_irqs(port);
- status = pl011_readw(uap, REG_FR);
+ status = readw(uap->port.membase + UART01x_FR);
if (status & UART01x_FR_RXFE)
return NO_POLL_CHAR;
- return pl011_readw(uap, REG_DR);
+ return readw(uap->port.membase + UART01x_DR);
}
static void pl011_put_poll_char(struct uart_port *port,
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
barrier();
- pl011_writew(uap, ch, REG_DR);
+ writew(ch, uap->port.membase + UART01x_DR);
}
#endif /* CONFIG_CONSOLE_POLL */
uap->port.uartclk = clk_get_rate(uap->clk);
/* Clear pending error and receive interrupts */
- pl011_writew(uap, UART011_OEIS | UART011_BEIS | UART011_PEIS |
- UART011_FEIS | UART011_RTIS | UART011_RXIS, REG_ICR);
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
+ UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
/*
* Save interrupts enable mask, and enable RX interrupts in case if
* the interrupt is used for NMI entry.
*/
- uap->im = pl011_readw(uap, REG_IMSC);
- pl011_writew(uap, UART011_RTIM | UART011_RXIM, REG_IMSC);
+ uap->im = readw(uap->port.membase + UART011_IMSC);
+ writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
{
- pl011_writew(uap, lcr_h, uap->lcrh_rx);
- if (is_implemented(uap, REG_ST_LCRH_RX)) {
+ writew(lcr_h, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_rx != uap->lcrh_tx) {
int i;
/*
* Wait 10 PCLKs before writing LCRH_TX register,
* to get this delay write read only register 10 times
*/
for (i = 0; i < 10; ++i)
- pl011_writew(uap, 0xff, REG_MIS);
- pl011_writew(uap, lcr_h, uap->lcrh_tx);
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(lcr_h, uap->port.membase + uap->lcrh_tx);
}
}
static int pl011_allocate_irq(struct uart_amba_port *uap)
{
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
}
spin_lock_irq(&uap->port.lock);
/* Clear out any spuriously appearing RX interrupts */
- pl011_writew(uap, UART011_RTIS | UART011_RXIS, REG_ICR);
+ writew(UART011_RTIS | UART011_RXIS,
+ uap->port.membase + UART011_ICR);
uap->im = UART011_RTIM;
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
- pl011_writew(uap, uap->im, REG_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
}
if (retval)
goto clk_dis;
- pl011_writew(uap, uap->vendor->ifls, REG_IFLS);
+ writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
spin_lock_irq(&uap->port.lock);
/* restore RTS and DTR */
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
- pl011_writew(uap, cr, REG_CR);
+ writew(cr, uap->port.membase + UART011_CR);
spin_unlock_irq(&uap->port.lock);
/*
* initialise the old status of the modem signals
*/
- uap->old_status = pl011_readw(uap, REG_FR) & UART01x_FR_MODEM_ANY;
+ uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
/* Startup DMA */
pl011_dma_startup(uap);
static void pl011_shutdown_channel(struct uart_amba_port *uap,
unsigned int lcrh)
{
- unsigned long val;
+ unsigned long val;
- val = pl011_readw(uap, lcrh);
- val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
- pl011_writew(uap, val, lcrh);
+ val = readw(uap->port.membase + lcrh);
+ val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+ writew(val, uap->port.membase + lcrh);
}
/*
uap->autorts = false;
spin_lock_irq(&uap->port.lock);
- cr = pl011_readw(uap, REG_CR);
+ cr = readw(uap->port.membase + UART011_CR);
uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
- pl011_writew(uap, cr, REG_CR);
+ writew(cr, uap->port.membase + UART011_CR);
spin_unlock_irq(&uap->port.lock);
/*
* disable break condition and fifos
*/
pl011_shutdown_channel(uap, uap->lcrh_rx);
- if (is_implemented(uap, REG_ST_LCRH_RX))
+ if (uap->lcrh_rx != uap->lcrh_tx)
pl011_shutdown_channel(uap, uap->lcrh_tx);
}
/* mask all interrupts and clear all pending ones */
uap->im = 0;
- pl011_writew(uap, uap->im, REG_IMSC);
- pl011_writew(uap, 0xffff, REG_ICR);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ writew(0xffff, uap->port.membase + UART011_ICR);
spin_unlock_irq(&uap->port.lock);
}
pl011_enable_ms(port);
/* first, disable everything */
- old_cr = pl011_readw(uap, REG_CR);
- pl011_writew(uap, 0, REG_CR);
+ old_cr = readw(port->membase + UART011_CR);
+ writew(0, port->membase + UART011_CR);
if (termios->c_cflag & CRTSCTS) {
if (old_cr & UART011_CR_RTS)
quot -= 2;
}
/* Set baud rate */
- pl011_writew(uap, quot & 0x3f, REG_FBRD);
- pl011_writew(uap, quot >> 6, REG_IBRD);
+ writew(quot & 0x3f, port->membase + UART011_FBRD);
+ writew(quot >> 6, port->membase + UART011_IBRD);
/*
* ----------v----------v----------v----------v-----
* NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
- * REG_FBRD & REG_IBRD.
+ * UART011_FBRD & UART011_IBRD.
* ----------^----------^----------^----------^-----
*/
pl011_write_lcr_h(uap, lcr_h);
- pl011_writew(uap, old_cr, REG_CR);
+ writew(old_cr, port->membase + UART011_CR);
spin_unlock_irqrestore(&port->lock, flags);
}
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
barrier();
- pl011_writew(uap, ch, REG_DR);
+ writew(ch, uap->port.membase + UART01x_DR);
}
static void
* First save the CR then disable the interrupts
*/
if (!uap->vendor->always_enabled) {
- old_cr = pl011_readw(uap, REG_CR);
+ old_cr = readw(uap->port.membase + UART011_CR);
new_cr = old_cr & ~UART011_CR_CTSEN;
new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
- pl011_writew(uap, new_cr, REG_CR);
+ writew(new_cr, uap->port.membase + UART011_CR);
}
uart_console_write(&uap->port, s, count, pl011_console_putchar);
* and restore the TCR
*/
do {
- status = pl011_readw(uap, REG_FR);
- } while (status & uap->fr_busy);
+ status = readw(uap->port.membase + UART01x_FR);
+ } while (status & UART01x_FR_BUSY);
if (!uap->vendor->always_enabled)
- pl011_writew(uap, old_cr, REG_CR);
+ writew(old_cr, uap->port.membase + UART011_CR);
if (locked)
spin_unlock(&uap->port.lock);
pl011_console_get_options(struct uart_amba_port *uap, int *baud,
int *parity, int *bits)
{
- if (pl011_readw(uap, REG_CR) & UART01x_CR_UARTEN) {
+ if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
unsigned int lcr_h, ibrd, fbrd;
- lcr_h = pl011_readw(uap, uap->lcrh_tx);
+ lcr_h = readw(uap->port.membase + uap->lcrh_tx);
*parity = 'n';
if (lcr_h & UART01x_LCRH_PEN) {
else
*bits = 8;
- ibrd = pl011_readw(uap, REG_IBRD);
- fbrd = pl011_readw(uap, REG_FBRD);
+ ibrd = readw(uap->port.membase + UART011_IBRD);
+ fbrd = readw(uap->port.membase + UART011_FBRD);
*baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
if (uap->vendor->oversampling) {
- if (pl011_readw(uap, REG_CR)
+ if (readw(uap->port.membase + UART011_CR)
& ST_UART011_CR_OVSFACT)
*baud *= 2;
}
static void pl011_putc(struct uart_port *port, int c)
{
- struct uart_amba_port *uap =
- container_of(port, struct uart_amba_port, port);
-
- while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+ while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
;
- pl011_writeb(uap, c, REG_DR);
- while (pl011_readw(uap, REG_FR) & uap->fr_busy)
+ writeb(c, port->membase + UART01x_DR);
+ while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
;
}
int ret;
/* Ensure interrupts from this UART are masked and cleared */
- pl011_writew(uap, 0, REG_IMSC);
- pl011_writew(uap, 0xffff, REG_ICR);
+ writew(0, uap->port.membase + UART011_IMSC);
+ writew(0xffff, uap->port.membase + UART011_ICR);
if (!amba_reg.state) {
ret = uart_register_driver(&amba_reg);
return ret;
}
-#ifdef CONFIG_ARM_AMBA
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
return PTR_ERR(uap->clk);
uap->vendor = vendor;
- uap->reg_lut = vendor->reg_lut;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
- uap->fr_busy = vendor->fr_busy;
- uap->fr_dsr = vendor->fr_dsr;
- uap->fr_cts = vendor->fr_cts;
- uap->fr_ri = vendor->fr_ri;
uap->fifosize = vendor->get_fifosize(dev);
uap->port.irq = dev->irq[0];
uap->port.ops = &amba_pl011_pops;
pl011_unregister_port(uap);
return 0;
}
-#endif
-
-#ifdef CONFIG_SOC_ZX296702
-static int zx_uart_probe(struct platform_device *pdev)
-{
- struct uart_amba_port *uap;
- struct vendor_data *vendor = &vendor_zte;
- struct resource *res;
- int portnr, ret;
-
- portnr = pl011_find_free_port();
- if (portnr < 0)
- return portnr;
-
- uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
- GFP_KERNEL);
- if (!uap) {
- ret = -ENOMEM;
- goto out;
- }
-
- uap->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(uap->clk)) {
- ret = PTR_ERR(uap->clk);
- goto out;
- }
-
- uap->vendor = vendor;
- uap->reg_lut = vendor->reg_lut;
- uap->lcrh_rx = vendor->lcrh_rx;
- uap->lcrh_tx = vendor->lcrh_tx;
- uap->fr_busy = vendor->fr_busy;
- uap->fr_dsr = vendor->fr_dsr;
- uap->fr_cts = vendor->fr_cts;
- uap->fr_ri = vendor->fr_ri;
- uap->fifosize = 16;
- uap->port.irq = platform_get_irq(pdev, 0);
- uap->port.ops = &amba_pl011_pops;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- ret = pl011_setup_port(&pdev->dev, uap, res, portnr);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, uap);
-
- return pl011_register_port(uap);
-out:
- return ret;
-}
-
-static int zx_uart_remove(struct platform_device *pdev)
-{
- struct uart_amba_port *uap = platform_get_drvdata(pdev);
-
- uart_remove_one_port(&amba_reg, &uap->port);
- pl011_unregister_port(uap);
- return 0;
-}
-#endif
#ifdef CONFIG_PM_SLEEP
static int pl011_suspend(struct device *dev)
return -ENOMEM;
uap->vendor = &vendor_sbsa;
- uap->reg_lut = vendor_sbsa.reg_lut;
- uap->fr_busy = vendor_sbsa.fr_busy;
- uap->fr_dsr = vendor_sbsa.fr_dsr;
- uap->fr_cts = vendor_sbsa.fr_cts;
- uap->fr_ri = vendor_sbsa.fr_ri;
uap->fifosize = 32;
uap->port.irq = platform_get_irq(pdev, 0);
uap->port.ops = &sbsa_uart_pops;
},
};
-#ifdef CONFIG_ARM_AMBA
static struct amba_id pl011_ids[] = {
{
.id = 0x00041011,
.probe = pl011_probe,
.remove = pl011_remove,
};
-#endif
-
-#ifdef CONFIG_SOC_ZX296702
-static const struct of_device_id zx_uart_dt_ids[] = {
- { .compatible = "zte,zx296702-uart", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, zx_uart_dt_ids);
-
-static struct platform_driver zx_uart_driver = {
- .driver = {
- .name = "zx-uart",
- .owner = THIS_MODULE,
- .pm = &pl011_dev_pm_ops,
- .of_match_table = zx_uart_dt_ids,
- },
- .probe = zx_uart_probe,
- .remove = zx_uart_remove,
-};
-#endif
-
static int __init pl011_init(void)
{
- int ret;
printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
if (platform_driver_register(&arm_sbsa_uart_platform_driver))
pr_warn("could not register SBSA UART platform driver\n");
-
-#ifdef CONFIG_SOC_ZX296702
- ret = platform_driver_register(&zx_uart_driver);
- if (ret)
- pr_warn("could not register ZX UART platform driver\n");
-#endif
-
-#ifdef CONFIG_ARM_AMBA
- ret = amba_driver_register(&pl011_driver);
-#endif
- return ret;
+ return amba_driver_register(&pl011_driver);
}
static void __exit pl011_exit(void)
{
platform_driver_unregister(&arm_sbsa_uart_platform_driver);
-#ifdef CONFIG_SOC_ZX296702
- platform_driver_unregister(&zx_uart_driver);
-#endif
-#ifdef CONFIG_ARM_AMBA
amba_driver_unregister(&pl011_driver);
-#endif
}
/*
int nr_pages;
int irq;
struct xenfb_page *page;
- unsigned long *mfns;
+ unsigned long *gfns;
int update_wanted; /* XENFB_TYPE_UPDATE wanted */
int feature_resize; /* XENFB_TYPE_RESIZE ok */
struct xenfb_resize resize; /* protected by resize_lock */
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
- if (!info->mfns)
+ info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
+ if (!info->gfns)
goto error_nomem;
/* set up shared page */
framebuffer_release(info->fb_info);
}
free_page((unsigned long)info->page);
- vfree(info->mfns);
+ vfree(info->gfns);
vfree(info->fb);
kfree(info);
return 0;
}
-static unsigned long vmalloc_to_mfn(void *address)
+static unsigned long vmalloc_to_gfn(void *address)
{
- return pfn_to_mfn(vmalloc_to_pfn(address));
+ return xen_page_to_gfn(vmalloc_to_page(address));
}
static void xenfb_init_shared_page(struct xenfb_info *info,
struct fb_info *fb_info)
{
int i;
- int epd = PAGE_SIZE / sizeof(info->mfns[0]);
+ int epd = PAGE_SIZE / sizeof(info->gfns[0]);
for (i = 0; i < info->nr_pages; i++)
- info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
+ info->gfns[i] = vmalloc_to_gfn(info->fb + i * PAGE_SIZE);
for (i = 0; i * epd < info->nr_pages; i++)
- info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
+ info->page->pd[i] = vmalloc_to_gfn(&info->gfns[i * epd]);
info->page->width = fb_info->var.xres;
info->page->height = fb_info->var.yres;
goto unbind_irq;
}
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
- virt_to_mfn(info->page));
+ virt_to_gfn(info->page));
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
}
set_page_pfns(vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
- adjust_managed_page_count(page, -1);
+ if (!virtio_has_feature(vb->vdev,
+ VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ adjust_managed_page_count(page, -1);
}
/* Did we get any? */
mutex_unlock(&vb->balloon_lock);
}
-static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
+static void release_pages_balloon(struct virtio_balloon *vb)
{
unsigned int i;
/* Find pfns pointing at start of each page, get pages and free them. */
- for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_pfn_to_page(pfns[i]);
- adjust_managed_page_count(page, 1);
+ for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ struct page *page = balloon_pfn_to_page(vb->pfns[i]);
+ if (!virtio_has_feature(vb->vdev,
+ VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ adjust_managed_page_count(page, 1);
put_page(page); /* balloon reference */
}
}
if (vb->num_pfns != 0)
tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ release_pages_balloon(vb);
return num_freed_pages;
}
#define pr_fmt(fmt) "virtio-mmio: " fmt
+#include <linux/acpi.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
};
MODULE_DEVICE_TABLE(of, virtio_mmio_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
+ { "LNRO0005", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
+#endif
+
static struct platform_driver virtio_mmio_driver = {
.probe = virtio_mmio_probe,
.remove = virtio_mmio_remove,
.driver = {
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,
+ .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
},
};
/* Update direct mapping, invalidate P2M, and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = frame_list[i];
- frame_list[i] = pfn_to_mfn(pfn);
+ frame_list[i] = pfn_to_gfn(pfn);
page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2)
{
- unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page));
- unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page));
+ unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
+ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
- ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
+ ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
}
EXPORT_SYMBOL(xen_biovec_phys_mergeable);
struct physdev_pirq_eoi_gmfn eoi_gmfn;
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
+ eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
/* TODO: No PVH support for PIRQ EOI */
if (rc != 0) {
for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
q->head[i] = 0;
- init_control.control_gfn = virt_to_mfn(control_block);
+ init_control.control_gfn = virt_to_gfn(control_block);
init_control.offset = 0;
init_control.vcpu = cpu;
/* Mask all events in this page before adding it. */
init_array_page(array_page);
- expand_array.array_gfn = virt_to_mfn(array_page);
+ expand_array.array_gfn = virt_to_gfn(array_page);
ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
if (ret < 0)
/* Grant foreign access to the page. */
rc = gnttab_grant_foreign_access(op->domid,
- pfn_to_mfn(page_to_pfn(gref->page)), readonly);
+ xen_page_to_gfn(gref->page),
+ readonly);
if (rc < 0)
goto undo;
gref_ids[i] = gref->gref_id = rc;
* is resuming in a new domain.
*/
si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
- ? virt_to_mfn(xen_start_info)
+ ? virt_to_gfn(xen_start_info)
: 0);
xen_arch_post_suspend(si->cancelled);
return ret;
}
-struct mmap_mfn_state {
+struct mmap_gfn_state {
unsigned long va;
struct vm_area_struct *vma;
domid_t domain;
};
-static int mmap_mfn_range(void *data, void *state)
+static int mmap_gfn_range(void *data, void *state)
{
struct privcmd_mmap_entry *msg = data;
- struct mmap_mfn_state *st = state;
+ struct mmap_gfn_state *st = state;
struct vm_area_struct *vma = st->vma;
int rc;
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL;
- rc = xen_remap_domain_mfn_range(vma,
+ rc = xen_remap_domain_gfn_range(vma,
msg->va & PAGE_MASK,
msg->mfn, msg->npages,
vma->vm_page_prot,
struct vm_area_struct *vma;
int rc;
LIST_HEAD(pagelist);
- struct mmap_mfn_state state;
+ struct mmap_gfn_state state;
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
if (xen_feature(XENFEAT_auto_translated_physmap))
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
&pagelist,
- mmap_mfn_range, &state);
+ mmap_gfn_range, &state);
out_up:
int global_error;
int version;
- /* User-space mfn array to store errors in the second pass for V1. */
- xen_pfn_t __user *user_mfn;
+ /* User-space gfn array to store errors in the second pass for V1. */
+ xen_pfn_t __user *user_gfn;
/* User-space int array to store errors in the second pass for V2. */
int __user *user_err;
};
-/* auto translated dom0 note: if domU being created is PV, then mfn is
- * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
+/* auto translated dom0 note: if domU being created is PV, then gfn is
+ * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
*/
static int mmap_batch_fn(void *data, int nr, void *state)
{
- xen_pfn_t *mfnp = data;
+ xen_pfn_t *gfnp = data;
struct mmap_batch_state *st = state;
struct vm_area_struct *vma = st->vma;
struct page **pages = vma->vm_private_data;
cur_pages = &pages[st->index];
BUG_ON(nr < 0);
- ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
- (int *)mfnp, st->vma->vm_page_prot,
+ ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
+ (int *)gfnp, st->vma->vm_page_prot,
st->domain, cur_pages);
/* Adjust the global_error? */
if (st->version == 1) {
if (err) {
- xen_pfn_t mfn;
+ xen_pfn_t gfn;
- ret = get_user(mfn, st->user_mfn);
+ ret = get_user(gfn, st->user_gfn);
if (ret < 0)
return ret;
/*
* V1 encodes the error codes in the 32bit top
- * nibble of the mfn (with its known
+ * nibble of the gfn (with its known
* limitations vis-a-vis 64 bit callers).
*/
- mfn |= (err == -ENOENT) ?
+ gfn |= (err == -ENOENT) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR;
- return __put_user(mfn, st->user_mfn++);
+ return __put_user(gfn, st->user_gfn++);
} else
- st->user_mfn++;
+ st->user_gfn++;
} else { /* st->version == 2 */
if (err)
return __put_user(err, st->user_err++);
return 0;
}
-/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
+/* Allocate pfns that are then mapped with gfns from foreign domid. Update
* the vma with the page info to use later.
* Returns: 0 if success, otherwise -errno
*/
if (state.global_error) {
/* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
+ state.user_gfn = (xen_pfn_t *)m.arr;
state.user_err = m.err;
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors, &state);
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
- rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
if (rc == 0)
free_xenballooned_pages(numpgs, pages);
else
*/
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
- unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
- dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
+ unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
+ dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
dma |= paddr & ~PAGE_MASK;
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
- unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
+ unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
phys_addr_t paddr = dma;
unsigned int offset,
size_t length)
{
- unsigned long next_mfn;
+ unsigned long next_bfn;
int i;
int nr_pages;
- next_mfn = pfn_to_mfn(pfn);
+ next_bfn = pfn_to_bfn(pfn);
nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
for (i = 1; i < nr_pages; i++) {
- if (pfn_to_mfn(++pfn) != ++next_mfn)
+ if (pfn_to_bfn(++pfn) != ++next_bfn)
return 0;
}
return 1;
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
{
- unsigned long mfn = PFN_DOWN(dma_addr);
- unsigned long pfn = mfn_to_local_pfn(mfn);
+ unsigned long bfn = PFN_DOWN(dma_addr);
+ unsigned long pfn = bfn_to_local_pfn(bfn);
phys_addr_t paddr;
/* If the address is outside our domain, it CAN
/* xen generic tmem ops */
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
- u32 index, unsigned long pfn)
+ u32 index, struct page *page)
{
- unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
-
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
- gmfn, 0, 0, 0);
+ xen_page_to_gfn(page), 0, 0, 0);
}
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
- u32 index, unsigned long pfn)
+ u32 index, struct page *page)
{
- unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
-
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
- gmfn, 0, 0, 0);
+ xen_page_to_gfn(page), 0, 0, 0);
}
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
- unsigned long pfn = page_to_pfn(page);
if (pool < 0)
return;
if (ind != index)
return;
mb(); /* ensure page is quiescent; tmem may address it with an alias */
- (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
+ (void)xen_tmem_put_page((u32)pool, oid, ind, page);
}
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
- unsigned long pfn = page_to_pfn(page);
int ret;
/* translate return values to linux semantics */
return -1;
if (ind != index)
return -1;
- ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
+ ret = xen_tmem_get_page((u32)pool, oid, ind, page);
if (ret == 1)
return 0;
else
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
- unsigned long pfn = page_to_pfn(page);
int pool = tmem_frontswap_poolid;
int ret;
if (ind64 != ind)
return -1;
mb(); /* ensure page is quiescent; tmem may address it with an alias */
- ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */
if (ret == 1)
return 0;
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
- unsigned long pfn = page_to_pfn(page);
int pool = tmem_frontswap_poolid;
int ret;
return -1;
if (ind64 != ind)
return -1;
- ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */
if (ret == 1)
return 0;
for (i = 0; i < nr_pages; i++) {
err = gnttab_grant_foreign_access(dev->otherend_id,
- virt_to_mfn(vaddr), 0);
+ virt_to_gfn(vaddr), 0);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"granting access to ring page");
goto out_err;
gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
- virt_to_mfn(xen_store_interface), 0 /* writable */);
+ virt_to_gfn(xen_store_interface), 0 /* writable */);
arg.dom = DOMID_SELF;
arg.remote_dom = domid;
enum xenstore_init xen_store_domain_type;
EXPORT_SYMBOL_GPL(xen_store_domain_type);
-static unsigned long xen_store_mfn;
+static unsigned long xen_store_gfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
if (!page)
goto out_err;
- xen_store_mfn = xen_start_info->store_mfn =
- pfn_to_mfn(virt_to_phys((void *)page) >>
- PAGE_SHIFT);
+ xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
/* Next allocate a local port which xenstored can bind to */
alloc_unbound.dom = DOMID_SELF;
err = xenstored_local_init();
if (err)
goto out_error;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ xen_store_interface = gfn_to_virt(xen_store_gfn);
break;
case XS_PV:
xen_store_evtchn = xen_start_info->store_evtchn;
- xen_store_mfn = xen_start_info->store_mfn;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ xen_store_gfn = xen_start_info->store_mfn;
+ xen_store_interface = gfn_to_virt(xen_store_gfn);
break;
case XS_HVM:
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
- xen_store_mfn = (unsigned long)v;
+ xen_store_gfn = (unsigned long)v;
xen_store_interface =
- xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
+ xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE);
break;
default:
pr_warn("Xenstore state unknown\n");
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
-/* map fgmfn of domid to lpfn in the current domain */
-static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
+/* map fgfn of domid to lpfn in the current domain */
+static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
unsigned int domid)
{
int rc;
.size = 1,
.space = XENMAPSPACE_gmfn_foreign,
};
- xen_ulong_t idx = fgmfn;
+ xen_ulong_t idx = fgfn;
xen_pfn_t gpfn = lpfn;
int err = 0;
}
struct remap_data {
- xen_pfn_t *fgmfn; /* foreign domain's gmfn */
+ xen_pfn_t *fgfn; /* foreign domain's gfn */
pgprot_t prot;
domid_t domid;
struct vm_area_struct *vma;
int index;
struct page **pages;
- struct xen_remap_mfn_info *info;
+ struct xen_remap_gfn_info *info;
int *err_ptr;
int mapped;
};
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
int rc;
- rc = map_foreign_page(pfn, *info->fgmfn, info->domid);
+ rc = map_foreign_page(pfn, *info->fgfn, info->domid);
*info->err_ptr++ = rc;
if (!rc) {
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
info->mapped++;
}
- info->fgmfn++;
+ info->fgfn++;
return 0;
}
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *mfn, int nr,
+ xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages)
x86 PVOPS */
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
- data.fgmfn = mfn;
+ data.fgfn = gfn;
data.prot = prot;
data.domid = domid;
data.vma = vma;
--- /dev/null
+/*
+ * fs/cifs/cifs_ioctl.h
+ *
+ * Structure definitions for io control for cifs/smb3
+ *
+ * Copyright (c) 2015 Steve French <steve.french@primarydata.com>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ */
+
+struct smb_mnt_fs_info {
+ __u32 version; /* 0001 */
+ __u16 protocol_id;
+ __u16 tcon_flags;
+ __u32 vol_serial_number;
+ __u32 vol_create_time;
+ __u32 share_caps;
+ __u32 share_flags;
+ __u32 sector_flags;
+ __u32 optimal_sector_size;
+ __u32 max_bytes_chunk;
+ __u32 fs_attributes;
+ __u32 max_path_component;
+ __u32 device_type;
+ __u32 device_characteristics;
+ __u32 maximal_access;
+ __u64 cifs_posix_caps;
+} __packed;
+
+#define CIFS_IOCTL_MAGIC 0xCF
+#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
+#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
+#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.06"
+#define CIFS_VERSION "2.07"
#endif /* _CIFSFS_H */
#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
+/* Device Characteristics */
+#define FILE_REMOVABLE_MEDIA 0x00000001
+#define FILE_READ_ONLY_DEVICE 0x00000002
+#define FILE_FLOPPY_DISKETTE 0x00000004
+#define FILE_WRITE_ONCE_MEDIA 0x00000008
+#define FILE_REMOTE_DEVICE 0x00000010
+#define FILE_DEVICE_IS_MOUNTED 0x00000020
+#define FILE_VIRTUAL_VOLUME 0x00000040
+#define FILE_DEVICE_SECURE_OPEN 0x00000100
+#define FILE_CHARACTERISTIC_TS_DEVICE 0x00001000
+#define FILE_CHARACTERISTIC_WEBDAV_DEVICE 0x00002000
+#define FILE_PORTABLE_DEVICE 0x00004000
+#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
+
typedef struct {
__le32 DeviceType;
__le32 DeviceCharacteristics;
{
struct TCP_Server_Info *server = mid->callback_data;
+ mutex_lock(&server->srv_mutex);
DeleteMidQEntry(mid);
+ mutex_unlock(&server->srv_mutex);
add_credits(server, 1, CIFS_ECHO_OP);
}
}
queue_work(cifsiod_wq, &rdata->work);
+ mutex_lock(&server->srv_mutex);
DeleteMidQEntry(mid);
+ mutex_unlock(&server->srv_mutex);
add_credits(server, 1, 0);
}
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct TCP_Server_Info *server = tcon->ses->server;
unsigned int written;
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
}
queue_work(cifsiod_wq, &wdata->work);
+ mutex_lock(&server->srv_mutex);
DeleteMidQEntry(mid);
+ mutex_unlock(&server->srv_mutex);
add_credits(tcon->ses->server, 1, 0);
}
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifsfs.h"
+#include "cifs_ioctl.h"
#include <linux/btrfs.h>
-#define CIFS_IOCTL_MAGIC 0xCF
-#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
-#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
-
static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
unsigned long srcfd, u64 off, u64 len, u64 destoff,
bool dup_extents)
return rc;
}
+static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+ void __user *arg)
+{
+ int rc = 0;
+ struct smb_mnt_fs_info *fsinf;
+
+ fsinf = kzalloc(sizeof(struct smb_mnt_fs_info), GFP_KERNEL);
+ if (fsinf == NULL)
+ return -ENOMEM;
+
+ fsinf->version = 1;
+ fsinf->protocol_id = tcon->ses->server->vals->protocol_id;
+ fsinf->device_characteristics =
+ le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics);
+ fsinf->device_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+ fsinf->fs_attributes = le32_to_cpu(tcon->fsAttrInfo.Attributes);
+ fsinf->max_path_component =
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
+#ifdef CONFIG_CIFS_SMB2
+ fsinf->vol_serial_number = tcon->vol_serial_number;
+ fsinf->vol_create_time = le64_to_cpu(tcon->vol_create_time);
+ fsinf->share_flags = tcon->share_flags;
+ fsinf->share_caps = le32_to_cpu(tcon->capabilities);
+ fsinf->sector_flags = tcon->ss_flags;
+ fsinf->optimal_sector_size = tcon->perf_sector_size;
+ fsinf->max_bytes_chunk = tcon->max_bytes_chunk;
+ fsinf->maximal_access = tcon->maximal_access;
+#endif /* SMB2 */
+ fsinf->cifs_posix_caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+
+ if (copy_to_user(arg, fsinf, sizeof(struct smb_mnt_fs_info)))
+ rc = -EFAULT;
+
+ kfree(fsinf);
+ return rc;
+}
+
long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
{
struct inode *inode = file_inode(filep);
xid = get_xid();
- cifs_dbg(FYI, "ioctl file %p cmd %u arg %lu\n", filep, command, arg);
-
cifs_sb = CIFS_SB(inode->i_sb);
switch (command) {
else
rc = -EOPNOTSUPP;
break;
+ case CIFS_IOC_GET_MNT_INFO:
+ tcon = tlink_tcon(pSMBFile->tlink);
+ rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ break;
default:
cifs_dbg(FYI, "unsupported ioctl\n");
break;
if (mid->mid_state == MID_RESPONSE_RECEIVED)
credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
+ mutex_lock(&server->srv_mutex);
DeleteMidQEntry(mid);
+ mutex_unlock(&server->srv_mutex);
add_credits(server, credits_received, CIFS_ECHO_OP);
}
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
queue_work(cifsiod_wq, &rdata->work);
+ mutex_lock(&server->srv_mutex);
DeleteMidQEntry(mid);
+ mutex_unlock(&server->srv_mutex);
add_credits(server, credits_received, 0);
}
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct TCP_Server_Info *server = tcon->ses->server;
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
unsigned int credits_received = 1;
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
queue_work(cifsiod_wq, &wdata->work);
+ mutex_lock(&server->srv_mutex);
DeleteMidQEntry(mid);
+ mutex_unlock(&server->srv_mutex);
add_credits(tcon->ses->server, credits_received, 0);
}
}
spin_unlock(&GlobalMid_Lock);
+ mutex_lock(&server->srv_mutex);
DeleteMidQEntry(mid);
+ mutex_unlock(&server->srv_mutex);
return rc;
}
#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
+#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
unsigned long pmd_addr = address & PMD_MASK;
bool write = flags & FAULT_FLAG_WRITE;
long length;
- void *kaddr;
+ void __pmem *kaddr;
pgoff_t size, pgoff;
sector_t block, sector;
unsigned long pfn;
if (buffer_unwritten(&bh) || buffer_new(&bh)) {
int i;
for (i = 0; i < PTRS_PER_PMD; i++)
- clear_page(kaddr + i * PAGE_SIZE);
+ clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
+ wmb_pmem();
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
result |= VM_FAULT_MAJOR;
if (oldcount == 0) {
result = ufs_alloc_fragments (inode, cgno, goal, count, err);
if (result) {
+ ufs_clear_frags(inode, result + oldcount,
+ newcount - oldcount, locked_page != NULL);
write_seqlock(&UFS_I(inode)->meta_lock);
ufs_cpu_to_data_ptr(sb, p, result);
write_sequnlock(&UFS_I(inode)->meta_lock);
*err = 0;
UFS_I(inode)->i_lastfrag =
max(UFS_I(inode)->i_lastfrag, fragment + count);
- ufs_clear_frags(inode, result + oldcount,
- newcount - oldcount, locked_page != NULL);
}
mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT, result %llu\n", (unsigned long long)result);
/* Timer IRQ */
const struct kvm_irq_level *irq;
+
+ /* VGIC mapping */
+ struct irq_phys_map *map;
};
int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm);
void kvm_timer_init(struct kvm *kvm);
-void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *irq);
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
#define LR_STATE_ACTIVE (1 << 1)
#define LR_STATE_MASK (3 << 0)
#define LR_EOI_INT (1 << 2)
+#define LR_HW (1 << 3)
struct vgic_lr {
- u16 irq;
- u8 source;
- u8 state;
+ unsigned irq:10;
+ union {
+ unsigned hwirq:10;
+ unsigned source:3;
+ };
+ unsigned state:4;
};
struct vgic_vmcr {
struct kvm_io_device dev;
};
+struct irq_phys_map {
+ u32 virt_irq;
+ u32 phys_irq;
+ u32 irq;
+ bool active;
+};
+
+struct irq_phys_map_entry {
+ struct list_head entry;
+ struct rcu_head rcu;
+ struct irq_phys_map map;
+};
+
struct vgic_dist {
spinlock_t lock;
bool in_kernel;
struct vgic_vm_ops vm_ops;
struct vgic_io_device dist_iodev;
struct vgic_io_device *redist_iodevs;
+
+ /* Virtual irq to hwirq mapping */
+ spinlock_t irq_phys_map_lock;
+ struct list_head irq_phys_map_list;
};
struct vgic_v2_cpu_if {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
};
+
+ /* Protected by the distributor's irq_phys_map_lock */
+ struct list_head irq_phys_map_list;
};
#define LR_EMPTY 0xff
int kvm_vgic_hyp_init(void);
int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_get_max_vcpus(void);
+void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
+ struct irq_phys_map *map, bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
+struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
+ int virt_irq, int irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
+void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
#define UART01x_DR 0x00 /* Data read or written from the interface. */
#define UART01x_RSR 0x04 /* Receive status register (Read). */
#define UART01x_ECR 0x04 /* Error clear register (Write). */
-#define ZX_UART01x_DR 0x04 /* Data read or written from the interface. */
#define UART010_LCRH 0x08 /* Line control register, high byte. */
#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */
#define UART010_LCRM 0x0C /* Line control register, middle byte. */
#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */
#define UART010_LCRL 0x10 /* Line control register, low byte. */
#define UART010_CR 0x14 /* Control register. */
-#define ZX_UART01x_FR 0x14 /* Flag register (Read only). */
#define UART01x_FR 0x18 /* Flag register (Read only). */
#define UART010_IIR 0x1C /* Interrupt identification register (Read). */
#define UART010_ICR 0x1C /* Interrupt clear register (Write). */
#define UART011_LCRH 0x2c /* Line control register. */
#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */
#define UART011_CR 0x30 /* Control register. */
-#define ZX_UART011_LCRH_TX 0x30 /* Tx Line control register. */
#define UART011_IFLS 0x34 /* Interrupt fifo level select. */
-#define ZX_UART011_CR 0x34 /* Control register. */
-#define ZX_UART011_IFLS 0x38 /* Interrupt fifo level select. */
#define UART011_IMSC 0x38 /* Interrupt mask. */
#define UART011_RIS 0x3c /* Raw interrupt status. */
#define UART011_MIS 0x40 /* Masked interrupt status. */
-#define ZX_UART011_IMSC 0x40 /* Interrupt mask. */
#define UART011_ICR 0x44 /* Interrupt clear register. */
-#define ZX_UART011_RIS 0x44 /* Raw interrupt status. */
#define UART011_DMACR 0x48 /* DMA control register. */
-#define ZX_UART011_MIS 0x48 /* Masked interrupt status. */
-#define ZX_UART011_ICR 0x4c /* Interrupt clear register. */
#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */
-#define ZX_UART011_DMACR 0x50 /* DMA control register. */
#define ST_UART011_XON1 0x54 /* XON1 register. */
#define ST_UART011_XON2 0x58 /* XON2 register. */
#define ST_UART011_XOFF1 0x5C /* XON1 register. */
#define UART01x_RSR_PE 0x02
#define UART01x_RSR_FE 0x01
-#define ZX_UART01x_FR_BUSY 0x300
#define UART011_FR_RI 0x100
#define UART011_FR_TXFE 0x080
#define UART011_FR_RXFF 0x040
#define UART01x_FR_TXFF 0x020
#define UART01x_FR_RXFE 0x010
#define UART01x_FR_BUSY 0x008
-#define ZX_UART01x_FR_DSR 0x008
#define UART01x_FR_DCD 0x004
#define UART01x_FR_DSR 0x002
-#define ZX_UART01x_FR_CTS 0x002
#define UART01x_FR_CTS 0x001
-#define ZX_UART011_FR_RI 0x001
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
#define ICH_LR_EOI (1UL << 41)
#define ICH_LR_GROUP (1UL << 60)
+#define ICH_LR_HW (1UL << 61)
#define ICH_LR_STATE (3UL << 62)
#define ICH_LR_PENDING_BIT (1UL << 62)
#define ICH_LR_ACTIVE_BIT (1UL << 63)
+#define ICH_LR_PHYS_ID_SHIFT 32
+#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
-#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
#define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
+ unsigned int halt_poll_ns;
#ifdef CONFIG_HAS_IOMEM
int mmio_needed;
--- /dev/null
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MICROCHIPPHY_H
+#define _MICROCHIPPHY_H
+
+#define LAN88XX_INT_MASK (0x19)
+#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000)
+#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_MASK_RESERVED_ (0x0010)
+#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_MASK_RX__ER_ (0x0001)
+
+#define LAN88XX_INT_STS (0x1A)
+#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000)
+#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_STS_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_STS_RESERVED_ (0x0010)
+#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_STS_RX_ER_ (0x0001)
+
+#define LAN88XX_EXT_PAGE_ACCESS (0x1F)
+#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000)
+#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001)
+#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002)
+
+/* Extended Register Page 1 space */
+#define LAN88XX_EXT_MODE_CTRL (0x13)
+#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
+#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
+#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008)
+#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C)
+
+/* MMD 3 Registers */
+#define LAN88XX_MMD3_CHIP_ID (32877)
+#define LAN88XX_MMD3_CHIP_REV (32878)
+
+#endif /* _MICROCHIPPHY_H */
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
+
+extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ unsigned int ldiff, u32 dst_portid,
+ gfp_t gfp_mask);
+static inline struct sk_buff *
+netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
+ gfp_t gfp_mask)
+{
+ return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
+}
+
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
PWMF_EXPORTED = 1 << 2,
};
+/**
+ * struct pwm_device - PWM channel object
+ * @label: name of the PWM device
+ * @flags: flags associated with the PWM device
+ * @hwpwm: per-chip relative index of the PWM device
+ * @pwm: global index of the PWM device
+ * @chip: PWM chip providing this PWM device
+ * @chip_data: chip-private data associated with the PWM device
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ * @polarity: polarity of the PWM signal
+ */
struct pwm_device {
- const char *label;
- unsigned long flags;
- unsigned int hwpwm;
- unsigned int pwm;
- struct pwm_chip *chip;
- void *chip_data;
-
- unsigned int period; /* in nanoseconds */
- unsigned int duty_cycle; /* in nanoseconds */
- enum pwm_polarity polarity;
+ const char *label;
+ unsigned long flags;
+ unsigned int hwpwm;
+ unsigned int pwm;
+ struct pwm_chip *chip;
+ void *chip_data;
+
+ unsigned int period;
+ unsigned int duty_cycle;
+ enum pwm_polarity polarity;
};
+static inline bool pwm_is_enabled(const struct pwm_device *pwm)
+{
+ return test_bit(PWMF_ENABLED, &pwm->flags);
+}
+
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
{
if (pwm)
pwm->period = period;
}
-static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
{
return pwm ? pwm->period : 0;
}
pwm->duty_cycle = duty;
}
-static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
{
return pwm ? pwm->duty_cycle : 0;
}
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
+{
+ return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
+}
+
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
- int (*request)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*free)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- int (*config)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity);
- int (*enable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
+ int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
+ int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity);
+ int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
#ifdef CONFIG_DEBUG_FS
- void (*dbg_show)(struct pwm_chip *chip,
- struct seq_file *s);
+ void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
#endif
- struct module *owner;
+ struct module *owner;
};
/**
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
* @pwms: array of PWM devices allocated by the framework
+ * @of_xlate: request a PWM device given a device tree PWM specifier
+ * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
* @can_sleep: must be true if the .config(), .enable() or .disable()
* operations may sleep
*/
struct pwm_chip {
- struct device *dev;
- struct list_head list;
- const struct pwm_ops *ops;
- int base;
- unsigned int npwm;
-
- struct pwm_device *pwms;
-
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
- const struct of_phandle_args *args);
- unsigned int of_pwm_n_cells;
- bool can_sleep;
+ struct device *dev;
+ struct list_head list;
+ const struct pwm_ops *ops;
+ int base;
+ unsigned int npwm;
+
+ struct pwm_device *pwms;
+
+ struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+ unsigned int of_pwm_n_cells;
+ bool can_sleep;
};
#if IS_ENABLED(CONFIG_PWM)
return -ENOSYS;
}
+static inline struct reset_control *__must_check reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct reset_control *__must_check devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
static inline struct reset_control *reset_control_get_optional(
struct device *dev, const char *id)
{
struct nlattr **);
int (*fill)(struct fib_rule *, struct sk_buff *,
struct fib_rule_hdr *);
- u32 (*default_pref)(struct fib_rules_ops *ops);
size_t (*nlmsg_payload)(struct fib_rule *);
/* Called after modifications to the rules set, must flush
struct fib_lookup_arg *);
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
-u32 fib_default_rule_pref(struct fib_rules_ops *ops);
#endif
* @chandef: Channel definition for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
- * This field is only valid when the channel type is one of the HT types.
+ * This field is only valid when the channel is a wide HT/VHT channel.
+ * Note that with TDLS this can be the case (channel is HT, protection must
+ * be used from this field) even when the BSS association isn't using HT.
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- return NF_DROP;
+ return NF_ACCEPT;
}
#endif
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags);
+void nf_ct_tmpl_free(struct nf_conn *tmpl);
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
{
- return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+ return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
unsigned int nft_parse_register(const struct nlattr *attr);
#endif
+TRACE_EVENT(kvm_halt_poll_ns,
+ TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
+ TP_ARGS(grow, vcpu_id, new, old),
+
+ TP_STRUCT__entry(
+ __field(bool, grow)
+ __field(unsigned int, vcpu_id)
+ __field(int, new)
+ __field(int, old)
+ ),
+
+ TP_fast_assign(
+ __entry->grow = grow;
+ __entry->vcpu_id = vcpu_id;
+ __entry->new = new;
+ __entry->old = old;
+ ),
+
+ TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
+ __entry->vcpu_id,
+ __entry->new,
+ __entry->grow ? "grow" : "shrink",
+ __entry->old)
+);
+
+#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
+ trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
+#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
+ trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
#define EM_TI_C6000 140 /* TI C6X DSPs */
#define EM_AARCH64 183 /* ARM 64 bit */
#define EM_TILEPRO 188 /* Tilera TILEPro */
+#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
#define EM_TILEGX 191 /* Tilera TILE-Gx */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
#define EM_AVR32 0x18ad /* Atmel AVR32 */
#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
+#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_X25 0x0805 /* CCITT X.25 */
#define ETH_P_ARP 0x0806 /* Address Resolution packet */
__u32 count;
__u64 data_offset; /* relative to kvm_run start */
} io;
+ /* KVM_EXIT_DEBUG */
struct {
struct kvm_debug_exit_arch arch;
} debug;
__u32 data;
__u8 is_write;
} dcr;
+ /* KVM_EXIT_INTERNAL_ERROR */
struct {
__u32 suberror;
/* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
struct {
__u64 gprs[32];
} osi;
+ /* KVM_EXIT_PAPR_HCALL */
struct {
__u64 nr;
__u64 ret;
#define KVM_CAP_DISABLE_QUIRKS 116
#define KVM_CAP_X86_SMM 117
#define KVM_CAP_MULTI_ADDRESS_SPACE 118
+#define KVM_CAP_GUEST_DEBUG_HW_BPS 119
+#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
#ifdef KVM_CAP_IRQ_ROUTING
struct privcmd_mmap_entry {
__u64 va;
+ /*
+ * This should be a GFN. It's not possible to change the name because
+ * it's exposed to the user-space.
+ */
__u64 mfn;
__u64 npages;
};
#include <asm/xen/page.h>
-static inline unsigned long page_to_mfn(struct page *page)
+static inline unsigned long xen_page_to_gfn(struct page *page)
{
- return pfn_to_mfn(page_to_pfn(page));
+ return pfn_to_gfn(page_to_pfn(page));
}
struct xen_memory_region {
struct vm_area_struct;
/*
- * xen_remap_domain_mfn_array() - map an array of foreign frames
+ * xen_remap_domain_gfn_array() - map an array of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: Array of GFNs to map
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages);
-/* xen_remap_domain_mfn_range() - map a range of foreign frames
+/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: First GFN to map.
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages);
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
void __user *ukey = u64_to_ptr(attr->key);
void __user *uvalue = u64_to_ptr(attr->value);
int ufd = attr->map_fd;
- struct fd f = fdget(ufd);
struct bpf_map *map;
void *key, *value, *ptr;
+ struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
return -EINVAL;
+ f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
void __user *ukey = u64_to_ptr(attr->key);
void __user *uvalue = u64_to_ptr(attr->value);
int ufd = attr->map_fd;
- struct fd f = fdget(ufd);
struct bpf_map *map;
void *key, *value;
+ struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
return -EINVAL;
+ f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
{
void __user *ukey = u64_to_ptr(attr->key);
int ufd = attr->map_fd;
- struct fd f = fdget(ufd);
struct bpf_map *map;
+ struct fd f;
void *key;
int err;
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
return -EINVAL;
+ f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
void __user *ukey = u64_to_ptr(attr->key);
void __user *unext_key = u64_to_ptr(attr->next_key);
int ufd = attr->map_fd;
- struct fd f = fdget(ufd);
struct bpf_map *map;
void *key, *next_key;
+ struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
return -EINVAL;
+ f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
[BPF_ALU64] = "alu64",
};
-static const char *const bpf_alu_string[] = {
+static const char *const bpf_alu_string[16] = {
[BPF_ADD >> 4] = "+=",
[BPF_SUB >> 4] = "-=",
[BPF_MUL >> 4] = "*=",
[BPF_DW >> 3] = "u64",
};
-static const char *const bpf_jmp_string[] = {
+static const char *const bpf_jmp_string[16] = {
[BPF_JA >> 4] = "jmp",
[BPF_JEQ >> 4] = "==",
[BPF_JGT >> 4] = ">",
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
-#include <net/switchdev.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"
return err;
}
-static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
- u16 vid)
+static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
+ u16 vid)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ int err = 0;
/* If driver uses VLAN ndo ops, use 8021q to delete vid
* on device, otherwise try switchdev ops to delete vid.
},
};
- switchdev_port_obj_del(dev, &vlan_obj);
+ err = switchdev_port_obj_del(dev, &vlan_obj);
+ if (err == -EOPNOTSUPP)
+ err = 0;
}
+
+ return err;
}
static int __vlan_del(struct net_port_vlans *v, u16 vid)
if (v->port_idx) {
struct net_bridge_port *p = v->parent.port;
- __vlan_vid_del(p->dev, p->br, vid);
+ int err;
+
+ err = __vlan_vid_del(p->dev, p->br, vid);
+ if (err)
+ return err;
}
clear_bit(vid, v->vlan_bitmap);
}
EXPORT_SYMBOL(fib_default_rule_add);
-u32 fib_default_rule_pref(struct fib_rules_ops *ops)
+static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
{
struct list_head *pos;
struct fib_rule *rule;
return 0;
}
-EXPORT_SYMBOL(fib_default_rule_pref);
static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
}
rule->fr_net = net;
- if (tb[FRA_PRIORITY])
- rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
+ rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
+ : fib_default_rule_pref(ops);
if (tb[FRA_IIFNAME]) {
struct net_device *dev;
else
rule->suppress_ifgroup = -1;
- if (!tb[FRA_PRIORITY] && ops->default_pref)
- rule->pref = ops->default_pref(ops);
-
err = -EINVAL;
if (tb[FRA_GOTO]) {
if (rule->action != FR_ACT_GOTO)
.configure = dn_fib_rule_configure,
.compare = dn_fib_rule_compare,
.fill = dn_fib_rule_fill,
- .default_pref = fib_default_rule_pref,
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
.policy = dn_fib_rule_policy,
.delete = fib4_rule_delete,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
- .default_pref = fib_default_rule_pref,
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,
.match = ipmr_rule_match,
.configure = ipmr_rule_configure,
.compare = ipmr_rule_compare,
- .default_pref = fib_default_rule_pref,
.fill = ipmr_rule_fill,
.nlgroup = RTNLGRP_IPV4_RULE,
.policy = ipmr_rule_policy,
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
+static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+{
+ if (event == CA_EVENT_TX_START) {
+ s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
+ struct bictcp *ca = inet_csk_ca(sk);
+
+ /* We were application limited (idle) for a while.
+ * Shift epoch_start to keep cwnd growth to cubic curve.
+ */
+ if (ca->epoch_start && delta > 0)
+ ca->epoch_start += delta;
+ return;
+ }
+}
+
/* calculate the cubic root of x using a table lookup followed by one
* Newton-Raphson iteration.
* Avg err ~= 0.195%
.cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd,
+ .cwnd_event = bictcp_cwnd_event,
.pkts_acked = bictcp_acked,
.owner = THIS_MODULE,
.name = "cubic",
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
+ if (tcp_packets_in_flight(tp) == 0)
+ tcp_ca_event(sk, CA_EVENT_TX_START);
+
tp->lsndtime = now;
/* If it is a reply for ato after last received
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0)
- tcp_ca_event(sk, CA_EVENT_TX_START);
-
/* if no packet is in qdisc/device queue, then allow XPS to select
* another queue. We can be called from tcp_tsq_handler()
* which holds one reference to sk_wmem_alloc.
return -ENOBUFS;
}
-static u32 fib6_rule_default_pref(struct fib_rules_ops *ops)
-{
- return 0x3FFF;
-}
-
static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(16) /* dst */
.configure = fib6_rule_configure,
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
- .default_pref = fib6_rule_default_pref,
.nlmsg_payload = fib6_rule_nlmsg_payload,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = fib6_rule_policy,
.match = ip6mr_rule_match,
.configure = ip6mr_rule_configure,
.compare = ip6mr_rule_compare,
- .default_pref = fib_default_rule_pref,
.fill = ip6mr_rule_fill,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = ip6mr_rule_policy,
if (it->cache == &mrt->mfc6_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == mrt->mfc6_cache_array)
+ else if (it->cache == &mrt->mfc6_cache_array[it->ct])
read_unlock(&mrt_lock);
}
return -EINVAL;
}
-int ip6_route_add(struct fib6_config *cfg)
+int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
{
int err;
struct net *net = cfg->fc_nlinfo.nl_net;
struct net_device *dev = NULL;
struct inet6_dev *idev = NULL;
struct fib6_table *table;
- struct mx6_config mxc = { .mx = NULL, };
int addr_type;
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
cfg->fc_nlinfo.nl_net = dev_net(dev);
+ *rt_ret = rt;
+
+ return 0;
+out:
+ if (dev)
+ dev_put(dev);
+ if (idev)
+ in6_dev_put(idev);
+ if (rt)
+ dst_free(&rt->dst);
+
+ *rt_ret = NULL;
+
+ return err;
+}
+
+int ip6_route_add(struct fib6_config *cfg)
+{
+ struct mx6_config mxc = { .mx = NULL, };
+ struct rt6_info *rt = NULL;
+ int err;
+
+ err = ip6_route_info_create(cfg, &rt);
+ if (err)
+ goto out;
+
err = ip6_convert_metrics(&mxc, cfg);
if (err)
goto out;
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
kfree(mxc.mx);
+
return err;
out:
- if (dev)
- dev_put(dev);
- if (idev)
- in6_dev_put(idev);
if (rt)
dst_free(&rt->dst);
+
return err;
}
return err;
}
-static int ip6_route_multipath(struct fib6_config *cfg, int add)
+struct rt6_nh {
+ struct rt6_info *rt6_info;
+ struct fib6_config r_cfg;
+ struct mx6_config mxc;
+ struct list_head next;
+};
+
+static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
+{
+ struct rt6_nh *nh;
+
+ list_for_each_entry(nh, rt6_nh_list, next) {
+ pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
+ &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
+ nh->r_cfg.fc_ifindex);
+ }
+}
+
+static int ip6_route_info_append(struct list_head *rt6_nh_list,
+ struct rt6_info *rt, struct fib6_config *r_cfg)
+{
+ struct rt6_nh *nh;
+ struct rt6_info *rtnh;
+ int err = -EEXIST;
+
+ list_for_each_entry(nh, rt6_nh_list, next) {
+ /* check if rt6_info already exists */
+ rtnh = nh->rt6_info;
+
+ if (rtnh->dst.dev == rt->dst.dev &&
+ rtnh->rt6i_idev == rt->rt6i_idev &&
+ ipv6_addr_equal(&rtnh->rt6i_gateway,
+ &rt->rt6i_gateway))
+ return err;
+ }
+
+ nh = kzalloc(sizeof(*nh), GFP_KERNEL);
+ if (!nh)
+ return -ENOMEM;
+ nh->rt6_info = rt;
+ err = ip6_convert_metrics(&nh->mxc, r_cfg);
+ if (err) {
+ kfree(nh);
+ return err;
+ }
+ memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
+ list_add_tail(&nh->next, rt6_nh_list);
+
+ return 0;
+}
+
+static int ip6_route_multipath_add(struct fib6_config *cfg)
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
+ struct rt6_info *rt;
+ struct rt6_nh *err_nh;
+ struct rt6_nh *nh, *nh_safe;
int remaining;
int attrlen;
- int err = 0, last_err = 0;
+ int err = 1;
+ int nhn = 0;
+ int replace = (cfg->fc_nlinfo.nlh &&
+ (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
+ LIST_HEAD(rt6_nh_list);
remaining = cfg->fc_mp_len;
-beginning:
rtnh = (struct rtnexthop *)cfg->fc_mp;
- /* Parse a Multipath Entry */
+ /* Parse a Multipath Entry and build a list (rt6_nh_list) of
+ * rt6_info structs per nexthop
+ */
while (rtnh_ok(rtnh, remaining)) {
memcpy(&r_cfg, cfg, sizeof(*cfg));
if (rtnh->rtnh_ifindex)
if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla);
}
- err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
+
+ err = ip6_route_info_create(&r_cfg, &rt);
+ if (err)
+ goto cleanup;
+
+ err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
if (err) {
- last_err = err;
- /* If we are trying to remove a route, do not stop the
- * loop when ip6_route_del() fails (because next hop is
- * already gone), we should try to remove all next hops.
- */
- if (add) {
- /* If add fails, we should try to delete all
- * next hops that have been already added.
- */
- add = 0;
- remaining = cfg->fc_mp_len - remaining;
- goto beginning;
- }
+ dst_free(&rt->dst);
+ goto cleanup;
+ }
+
+ rtnh = rtnh_next(rtnh, &remaining);
+ }
+
+ err_nh = NULL;
+ list_for_each_entry(nh, &rt6_nh_list, next) {
+ err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
+ /* nh->rt6_info is used or freed at this point, reset to NULL*/
+ nh->rt6_info = NULL;
+ if (err) {
+ if (replace && nhn)
+ ip6_print_replace_route_err(&rt6_nh_list);
+ err_nh = nh;
+ goto add_errout;
}
+
/* Because each route is added like a single route we remove
* these flags after the first nexthop: if there is a collision,
* we have already failed to add the first nexthop:
*/
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
NLM_F_REPLACE);
+ nhn++;
+ }
+
+ goto cleanup;
+
+add_errout:
+ /* Delete routes that were already added */
+ list_for_each_entry(nh, &rt6_nh_list, next) {
+ if (err_nh == nh)
+ break;
+ ip6_route_del(&nh->r_cfg);
+ }
+
+cleanup:
+ list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
+ if (nh->rt6_info)
+ dst_free(&nh->rt6_info->dst);
+ kfree(nh->mxc.mx);
+ list_del(&nh->next);
+ kfree(nh);
+ }
+
+ return err;
+}
+
+static int ip6_route_multipath_del(struct fib6_config *cfg)
+{
+ struct fib6_config r_cfg;
+ struct rtnexthop *rtnh;
+ int remaining;
+ int attrlen;
+ int err = 1, last_err = 0;
+
+ remaining = cfg->fc_mp_len;
+ rtnh = (struct rtnexthop *)cfg->fc_mp;
+
+ /* Parse a Multipath Entry */
+ while (rtnh_ok(rtnh, remaining)) {
+ memcpy(&r_cfg, cfg, sizeof(*cfg));
+ if (rtnh->rtnh_ifindex)
+ r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
+
+ attrlen = rtnh_attrlen(rtnh);
+ if (attrlen > 0) {
+ struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+
+ nla = nla_find(attrs, attrlen, RTA_GATEWAY);
+ if (nla) {
+ nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+ r_cfg.fc_flags |= RTF_GATEWAY;
+ }
+ }
+ err = ip6_route_del(&r_cfg);
+ if (err)
+ last_err = err;
+
rtnh = rtnh_next(rtnh, &remaining);
}
return err;
if (cfg.fc_mp)
- return ip6_route_multipath(&cfg, 0);
+ return ip6_route_multipath_del(&cfg);
else
return ip6_route_del(&cfg);
}
return err;
if (cfg.fc_mp)
- return ip6_route_multipath(&cfg, 1);
+ return ip6_route_multipath_add(&cfg);
else
return ip6_route_add(&cfg);
}
rssi_hyst == bss_conf->cqm_rssi_hyst)
return 0;
+ if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return -EOPNOTSUPP;
+
bss_conf->cqm_rssi_thold = rssi_thold;
bss_conf->cqm_rssi_hyst = rssi_hyst;
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
int ret;
+ u32 i;
+ bool have_80mhz;
sband = local->hw.wiphy->bands[cbss->channel->band];
}
}
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ have_80mhz = false;
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
+
+ have_80mhz = true;
+ break;
+ }
+
+ if (!have_80mhz)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
ht_cap, ht_oper, vht_oper,
/* Filter out rates that the STA does not support */
*mask &= sta->supp_rates[sband->band];
- for (i = 0; i < sizeof(mcs_mask); i++)
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
mutex_unlock(&local->chanctx_mtx);
}
+static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ bool result = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+ !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
+ !sta->sta.ht_cap.ht_supported)
+ continue;
+ result = true;
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void
+iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ bool tdls_ht;
+ u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
+ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
+ u16 opmode;
+
+ /* Nothing to do if the BSS connection uses HT */
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
+ return;
+
+ tdls_ht = (sta && sta->sta.ht_cap.ht_supported) ||
+ iee80211_tdls_have_ht_peers(sdata);
+
+ opmode = sdata->vif.bss_conf.ht_operation_mode;
+
+ if (tdls_ht)
+ opmode |= protection;
+ else
+ opmode &= ~protection;
+
+ if (opmode == sdata->vif.bss_conf.ht_operation_mode)
+ return;
+
+ sdata->vif.bss_conf.ht_operation_mode = opmode;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+}
+
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper)
{
return -ENOTSUPP;
}
+ /* protect possible bss_conf changes and avoid concurrency in
+ * ieee80211_bss_info_change_notify()
+ */
+ sdata_lock(sdata);
mutex_lock(&local->mtx);
tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
iee80211_tdls_recalc_chanctx(sdata);
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
if (!sta) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
ret = -ENOLINK;
break;
}
+ iee80211_tdls_recalc_ht_protection(sdata, sta);
+
set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
!ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
ieee80211_flush_queues(local, sdata, false);
ret = sta_info_destroy_addr(sdata, peer);
+
+ mutex_lock(&local->sta_mtx);
+ iee80211_tdls_recalc_ht_protection(sdata, NULL);
+ mutex_unlock(&local->sta_mtx);
+
iee80211_tdls_recalc_chanctx(sdata);
break;
default:
&sdata->u.mgd.request_smps_work);
mutex_unlock(&local->mtx);
+ sdata_unlock(sdata);
return ret;
}
struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
+ bool have_80mhz;
memset(vht_cap, 0, sizeof(*vht_cap));
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
return;
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ have_80mhz = false;
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
+
+ have_80mhz = true;
+ break;
+ }
+
+ if (!have_80mhz)
+ return;
+
/*
* A VHT STA must support 40 MHz, but if we verify that here
* then we break a few things - some APs (e.g. Netgear R6300v2
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
#ifdef IP_SET_HASH_WITH_NET0
+/* cidr from 0 to SET_HOST_MASK() value and c = cidr + 1 */
#define NLEN(family) (SET_HOST_MASK(family) + 1)
+#define CIDR_POS(c) ((c) - 1)
#else
+/* cidr from 1 to SET_HOST_MASK() value and c = cidr + 1 */
#define NLEN(family) SET_HOST_MASK(family)
+#define CIDR_POS(c) ((c) - 2)
#endif
#else
} else if (h->nets[i].cidr[n] < cidr) {
j = i;
} else if (h->nets[i].cidr[n] == cidr) {
- h->nets[cidr - 1].nets[n]++;
+ h->nets[CIDR_POS(cidr)].nets[n]++;
return;
}
}
h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
}
h->nets[i].cidr[n] = cidr;
- h->nets[cidr - 1].nets[n] = 1;
+ h->nets[CIDR_POS(cidr)].nets[n] = 1;
}
static void
for (i = 0; i < nets_length; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
- h->nets[cidr - 1].nets[n]--;
- if (h->nets[cidr - 1].nets[n] > 0)
+ h->nets[CIDR_POS(cidr)].nets[n]--;
+ if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
return;
for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
+static void
+hash_netnet4_init(struct hash_netnet4_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
{
const struct hash_netnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, last;
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
+static void
+hash_netnet6_init(struct hash_netnet6_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
+static void
+hash_netportnet4_init(struct hash_netportnet4_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netportnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
+static void
+hash_netportnet6_init(struct hash_netportnet6_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netportnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netportnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
-static void nf_ct_tmpl_free(struct nf_conn *tmpl)
+void nf_ct_tmpl_free(struct nf_conn *tmpl)
{
nf_ct_ext_destroy(tmpl);
nf_ct_ext_free(tmpl);
kfree(tmpl);
}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
static void
destroy_conntrack(struct nf_conntrack *nfct)
err3:
free_percpu(snet->stats);
err2:
- nf_conntrack_free(ct);
+ nf_ct_tmpl_free(ct);
err1:
return err;
}
static void nfnetlink_rcv(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ u_int16_t res_id;
int msglen;
if (nlh->nlmsg_len < NLMSG_HDRLEN ||
nfgenmsg = nlmsg_data(nlh);
skb_pull(skb, msglen);
- nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
+ /* Work around old nft using host byte order */
+ if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
+ res_id = NFNL_SUBSYS_NFTABLES;
+ else
+ res_id = ntohs(nfgenmsg->res_id);
+ nfnetlink_rcv_batch(skb, nlh, res_id);
} else {
netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
}
__be32 **packet_id_ptr)
{
size_t size;
- size_t data_len = 0, cap_len = 0;
+ size_t data_len = 0, cap_len = 0, rem_len = 0;
unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
+ rem_len = data_len - hlen;
break;
}
size += nla_total_size(seclen);
}
- skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
+ skb = __netlink_alloc_skb(net->nfnl, size, rem_len, queue->peer_portid,
GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);
return 0;
err3:
- nf_conntrack_free(ct);
+ nf_ct_tmpl_free(ct);
err2:
nf_ct_l3proto_module_put(par->family);
err1:
mask = datagram_poll(file, sock, wait);
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (nlk->rx_ring.pg_vec) {
- if (netlink_has_valid_frame(&nlk->rx_ring))
- mask |= POLLIN | POLLRDNORM;
+ /* We could already have received frames in the normal receive
+ * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
+ * so if mask contains pollin/etc already, there's no point
+ * walking the ring.
+ */
+ if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ if (nlk->rx_ring.pg_vec) {
+ if (netlink_has_valid_frame(&nlk->rx_ring))
+ mask |= POLLIN | POLLRDNORM;
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
}
- spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (nlk->tx_ring.pg_vec) {
}
EXPORT_SYMBOL(netlink_unicast);
-struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask)
+struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ unsigned int ldiff, u32 dst_portid,
+ gfp_t gfp_mask)
{
#ifdef CONFIG_NETLINK_MMAP
+ unsigned int maxlen, linear_size;
struct sock *sk = NULL;
struct sk_buff *skb;
struct netlink_ring *ring;
struct nl_mmap_hdr *hdr;
- unsigned int maxlen;
sk = netlink_getsockbyportid(ssk, dst_portid);
if (IS_ERR(sk))
if (ring->pg_vec == NULL)
goto out_put;
- if (ring->frame_size - NL_MMAP_HDRLEN < size)
+ /* We need to account the full linear size needed as a ring
+ * slot cannot have non-linear parts.
+ */
+ linear_size = size + ldiff;
+ if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
goto out_put;
skb = alloc_skb_head(gfp_mask);
/* check again under lock */
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
- if (maxlen < size)
+ if (maxlen < linear_size)
goto out_free;
netlink_forward_ring(ring);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL)
goto err2;
+
netlink_ring_setup_skb(skb, sk, ring, hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
#endif
return alloc_skb(size, gfp_mask);
}
-EXPORT_SYMBOL_GPL(netlink_alloc_skb);
+EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
config OPENVSWITCH
tristate "Open vSwitch"
depends on INET
+ depends on (!NF_CONNTRACK || NF_CONNTRACK)
select LIBCRC32C
select MPLS
select NET_MPLS_GSO
If unsure, say N.
-config OPENVSWITCH_CONNTRACK
- bool "Open vSwitch conntrack action support"
- depends on OPENVSWITCH
- depends on NF_CONNTRACK
- default OPENVSWITCH
- ---help---
- If you say Y here, then Open vSwitch module will be able to pass
- packets through conntrack.
-
- Say N to exclude this support and reduce the binary size.
-
config OPENVSWITCH_GRE
tristate "Open vSwitch GRE tunneling support"
depends on OPENVSWITCH
vport-internal_dev.o \
vport-netdev.o
-openvswitch-$(CONFIG_OPENVSWITCH_CONNTRACK) += conntrack.o
+ifneq ($(CONFIG_NF_CONNTRACK),)
+openvswitch-y += conntrack.o
+endif
obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
struct ovs_conntrack_info;
enum ovs_key_attr;
-#if defined(CONFIG_OPENVSWITCH_CONNTRACK)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
void ovs_ct_init(struct net *);
void ovs_ct_exit(struct net *);
bool ovs_ct_verify(struct net *, enum ovs_key_attr attr);
}
static inline void ovs_ct_free_action(const struct nlattr *a) { }
-#endif
+#endif /* CONFIG_NF_CONNTRACK */
#endif /* ovs_conntrack.h */
} while (0)
/* rcu read lock must be held or the connection spinlock */
-static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
+static struct rds_connection *rds_conn_lookup(struct net *net,
+ struct hlist_head *head,
__be32 laddr, __be32 faddr,
struct rds_transport *trans)
{
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
- conn->c_trans == trans) {
+ conn->c_trans == trans && net == rds_conn_net(conn)) {
ret = conn;
break;
}
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
goto new_conn;
rcu_read_lock();
- conn = rds_conn_lookup(head, laddr, faddr, trans);
+ conn = rds_conn_lookup(net, head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
}
}
+ if (trans == NULL) {
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = ERR_PTR(-ENODEV);
+ goto out;
+ }
+
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
found = NULL;
else
- found = rds_conn_lookup(head, laddr, faddr, trans);
+ found = rds_conn_lookup(net, head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
{
struct rfkill *rfkill;
- rfkill_global_states[type].cur = blocked;
+ if (type == RFKILL_TYPE_ALL) {
+ int i;
+
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ rfkill_global_states[i].cur = blocked;
+ } else {
+ rfkill_global_states[type].cur = blocked;
+ }
+
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;
if (IS_ERR(rt))
continue;
+ if (!dst)
+ dst = &rt->dst;
+
/* Ensure the src address belongs to the output
* interface.
*/
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
false);
- if (!odev || odev->ifindex != fl4->flowi4_oif)
+ if (!odev || odev->ifindex != fl4->flowi4_oif) {
+ if (&rt->dst != dst)
+ dst_release(&rt->dst);
continue;
+ }
+ if (dst != &rt->dst)
+ dst_release(dst);
dst = &rt->dst;
break;
}
.cb = cb,
.idx = idx,
};
- int err;
-
- err = switchdev_port_obj_dump(dev, &dump.obj);
- if (err)
- return err;
+ switchdev_port_obj_dump(dev, &dump.obj);
return dump.idx;
}
EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
}
}
+/**
+ * bclink_prepare_wakeup - prepare users for wakeup after congestion
+ * @bcl: broadcast link
+ * @resultq: queue for users which can be woken up
+ * Move a number of waiting users, as permitted by available space in
+ * the send queue, from link wait queue to specified queue for wakeup
+ */
+static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+{
+ int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
+ int imp, lim;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
+ imp = TIPC_SKB_CB(skb)->chain_imp;
+ lim = bcl->window + bcl->backlog[imp].limit;
+ pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
+ if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
+ continue;
+ skb_unlink(skb, &bcl->wakeupq);
+ skb_queue_tail(resultq, skb);
+ }
+}
+
/**
* tipc_bclink_wakeup_users - wake up pending users
*
void tipc_bclink_wakeup_users(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+ struct sk_buff_head resultq;
- tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
+ skb_queue_head_init(&resultq);
+ bclink_prepare_wakeup(bcl, &resultq);
+ tipc_sk_rcv(net, &resultq);
}
/**
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
- regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
+ regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
spin_lock(®_requests_lock);
list_splice_tail_init(&tmp_reg_req_list, ®_requests_list);
int ret;
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
- ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->irq->irq,
- timer->irq->level);
+ kvm_vgic_set_phys_irq_active(timer->map, true);
+ ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer->map,
+ timer->irq->level);
WARN_ON(ret);
}
cycle_t cval, now;
if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
- !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
+ !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
+ kvm_vgic_get_phys_irq_active(timer->map))
return false;
cval = timer->cntv_cval;
timer_arm(timer, ns);
}
-void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *irq)
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct irq_phys_map *map;
/*
* The vcpu timer irq number cannot be determined in
* vcpu timer irq number when the vcpu is reset.
*/
timer->irq = irq;
+
+ /*
+ * Tell the VGIC that the virtual interrupt is tied to a
+ * physical interrupt. We do that once per VCPU.
+ */
+ map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
+ if (WARN_ON(IS_ERR(map)))
+ return PTR_ERR(map);
+
+ timer->map = map;
+ return 0;
}
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer_disarm(timer);
+ if (timer->map)
+ kvm_vgic_unmap_phys_irq(vcpu, timer->map);
}
void kvm_timer_enable(struct kvm *kvm)
lr_desc.state |= LR_STATE_ACTIVE;
if (val & GICH_LR_EOI)
lr_desc.state |= LR_EOI_INT;
+ if (val & GICH_LR_HW) {
+ lr_desc.state |= LR_HW;
+ lr_desc.hwirq = (val & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT;
+ }
return lr_desc;
}
static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc)
{
- u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq;
+ u32 lr_val;
+
+ lr_val = lr_desc.irq;
if (lr_desc.state & LR_STATE_PENDING)
lr_val |= GICH_LR_PENDING_BIT;
if (lr_desc.state & LR_EOI_INT)
lr_val |= GICH_LR_EOI;
+ if (lr_desc.state & LR_HW) {
+ lr_val |= GICH_LR_HW;
+ lr_val |= (u32)lr_desc.hwirq << GICH_LR_PHYSID_CPUID_SHIFT;
+ }
+
+ if (lr_desc.irq < VGIC_NR_SGIS)
+ lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
+
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
}
lr_desc.state |= LR_STATE_ACTIVE;
if (val & ICH_LR_EOI)
lr_desc.state |= LR_EOI_INT;
+ if (val & ICH_LR_HW) {
+ lr_desc.state |= LR_HW;
+ lr_desc.hwirq = (val >> ICH_LR_PHYS_ID_SHIFT) & GENMASK(9, 0);
+ }
return lr_desc;
}
* Eventually we want to make this configurable, so we may revisit
* this in the future.
*/
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ switch (vcpu->kvm->arch.vgic.vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
lr_val |= ICH_LR_GROUP;
- else
- lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ if (lr_desc.irq < VGIC_NR_SGIS)
+ lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+ break;
+ default:
+ BUG();
+ }
if (lr_desc.state & LR_STATE_PENDING)
lr_val |= ICH_LR_PENDING_BIT;
lr_val |= ICH_LR_ACTIVE_BIT;
if (lr_desc.state & LR_EOI_INT)
lr_val |= ICH_LR_EOI;
+ if (lr_desc.state & LR_HW) {
+ lr_val |= ICH_LR_HW;
+ lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
+ }
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
}
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <asm/kvm_emulate.h>
* cause the interrupt to become inactive in such a situation.
* Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
* inactive as long as the external input line is held high.
+ *
+ *
+ * Initialization rules: there are multiple stages to the vgic
+ * initialization, both for the distributor and the CPU interfaces.
+ *
+ * Distributor:
+ *
+ * - kvm_vgic_early_init(): initialization of static data that doesn't
+ * depend on any sizing information or emulation type. No allocation
+ * is allowed there.
+ *
+ * - vgic_init(): allocation and initialization of the generic data
+ * structures that depend on sizing information (number of CPUs,
+ * number of interrupts). Also initializes the vcpu specific data
+ * structures. Can be executed lazily for GICv2.
+ * [to be renamed to kvm_vgic_init??]
+ *
+ * CPU Interface:
+ *
+ * - kvm_vgic_cpu_early_init(): initialization of static data that
+ * doesn't depend on any sizing information or emulation type. No
+ * allocation is allowed there.
*/
#include "vgic.h"
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
+static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
+ int virt_irq);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
{
- return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
+ return !vgic_irq_is_queued(vcpu, irq);
}
/**
if (!vgic_irq_is_edge(vcpu, irq))
vlr.state |= LR_EOI_INT;
+ if (vlr.irq >= VGIC_NR_SGIS) {
+ struct irq_phys_map *map;
+ map = vgic_irq_map_search(vcpu, irq);
+
+ /*
+ * If we have a mapping, and the virtual interrupt is
+ * being injected, then we must set the state to
+ * active in the physical world. Otherwise the
+ * physical interrupt will fire and the guest will
+ * exit before processing the virtual interrupt.
+ */
+ if (map) {
+ int ret;
+
+ BUG_ON(!map->active);
+ vlr.hwirq = map->phys_irq;
+ vlr.state |= LR_HW;
+ vlr.state &= ~LR_EOI_INT;
+
+ ret = irq_set_irqchip_state(map->irq,
+ IRQCHIP_STATE_ACTIVE,
+ true);
+ WARN_ON(ret);
+
+ /*
+ * Make sure we're not going to sample this
+ * again, as a HW-backed interrupt cannot be
+ * in the PENDING_ACTIVE stage.
+ */
+ vgic_irq_set_queued(vcpu, irq);
+ }
+ }
+
vgic_set_lr(vcpu, lr_nr, vlr);
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
return level_pending;
}
+/*
+ * Save the physical active state, and reset it to inactive.
+ *
+ * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
+ */
+static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
+{
+ struct irq_phys_map *map;
+ int ret;
+
+ if (!(vlr.state & LR_HW))
+ return 0;
+
+ map = vgic_irq_map_search(vcpu, vlr.irq);
+ BUG_ON(!map || !map->active);
+
+ ret = irq_get_irqchip_state(map->irq,
+ IRQCHIP_STATE_ACTIVE,
+ &map->active);
+
+ WARN_ON(ret);
+
+ if (map->active) {
+ ret = irq_set_irqchip_state(map->irq,
+ IRQCHIP_STATE_ACTIVE,
+ false);
+ WARN_ON(ret);
+ return 0;
+ }
+
+ return 1;
+}
+
/* Sync back the VGIC state after a guest run */
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
elrsr = vgic_get_elrsr(vcpu);
elrsr_ptr = u64_to_bitmask(&elrsr);
- /* Clear mappings for empty LRs */
- for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
+ /* Deal with HW interrupts, and clear mappings for empty LRs */
+ for (lr = 0; lr < vgic->nr_lr; lr++) {
struct vgic_lr vlr;
- if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
+ if (!test_bit(lr, vgic_cpu->lr_used))
continue;
vlr = vgic_get_lr(vcpu, lr);
+ if (vgic_sync_hwirq(vcpu, vlr)) {
+ /*
+ * So this is a HW interrupt that the guest
+ * EOI-ed. Clean the LR state and allow the
+ * interrupt to be sampled again.
+ */
+ vlr.state = 0;
+ vlr.hwirq = 0;
+ vgic_set_lr(vcpu, lr, vlr);
+ vgic_irq_clear_queued(vcpu, vlr.irq);
+ set_bit(lr, elrsr_ptr);
+ }
+
+ if (!test_bit(lr, elrsr_ptr))
+ continue;
+
+ clear_bit(lr, vgic_cpu->lr_used);
BUG_ON(vlr.irq >= dist->nr_irqs);
vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
}
static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
- unsigned int irq_num, bool level)
+ struct irq_phys_map *map,
+ unsigned int irq_num, bool level)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int enabled;
bool ret = true, can_inject = true;
+ if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
+ return -EINVAL;
+
spin_lock(&dist->lock);
vcpu = kvm_get_vcpu(kvm, cpuid);
out:
spin_unlock(&dist->lock);
- return ret ? cpuid : -EINVAL;
+ if (ret) {
+ /* kick the specified vcpu */
+ kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
+ }
+
+ return 0;
+}
+
+static int vgic_lazy_init(struct kvm *kvm)
+{
+ int ret = 0;
+
+ if (unlikely(!vgic_initialized(kvm))) {
+ /*
+ * We only provide the automatic initialization of the VGIC
+ * for the legacy case of a GICv2. Any other type must
+ * be explicitly initialized once setup with the respective
+ * KVM device call.
+ */
+ if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
+ return -EBUSY;
+
+ mutex_lock(&kvm->lock);
+ ret = vgic_init(kvm);
+ mutex_unlock(&kvm->lock);
+ }
+
+ return ret;
}
/**
* kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
* @kvm: The VM structure pointer
* @cpuid: The CPU for PPIs
- * @irq_num: The IRQ number that is assigned to the device
+ * @irq_num: The IRQ number that is assigned to the device. This IRQ
+ * must not be mapped to a HW interrupt.
* @level: Edge-triggered: true: to trigger the interrupt
* false: to ignore the call
- * Level-sensitive true: activates an interrupt
- * false: deactivates an interrupt
+ * Level-sensitive true: raise the input signal
+ * false: lower the input signal
*
* The GIC is not concerned with devices being active-LOW or active-HIGH for
* level-sensitive interrupts. You can think of the level parameter as 1
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level)
{
- int ret = 0;
- int vcpu_id;
-
- if (unlikely(!vgic_initialized(kvm))) {
- /*
- * We only provide the automatic initialization of the VGIC
- * for the legacy case of a GICv2. Any other type must
- * be explicitly initialized once setup with the respective
- * KVM device call.
- */
- if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
- ret = -EBUSY;
- goto out;
- }
- mutex_lock(&kvm->lock);
- ret = vgic_init(kvm);
- mutex_unlock(&kvm->lock);
+ struct irq_phys_map *map;
+ int ret;
- if (ret)
- goto out;
- }
+ ret = vgic_lazy_init(kvm);
+ if (ret)
+ return ret;
- if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
+ map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
+ if (map)
return -EINVAL;
- vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
- if (vcpu_id >= 0) {
- /* kick the specified vcpu */
- kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
- }
+ return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
+}
-out:
- return ret;
+/**
+ * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
+ * @kvm: The VM structure pointer
+ * @cpuid: The CPU for PPIs
+ * @map: Pointer to a irq_phys_map structure describing the mapping
+ * @level: Edge-triggered: true: to trigger the interrupt
+ * false: to ignore the call
+ * Level-sensitive true: raise the input signal
+ * false: lower the input signal
+ *
+ * The GIC is not concerned with devices being active-LOW or active-HIGH for
+ * level-sensitive interrupts. You can think of the level parameter as 1
+ * being HIGH and 0 being LOW and all devices being active-HIGH.
+ */
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
+ struct irq_phys_map *map, bool level)
+{
+ int ret;
+
+ ret = vgic_lazy_init(kvm);
+ if (ret)
+ return ret;
+
+ return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
}
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
+ int virt_irq)
+{
+ if (virt_irq < VGIC_NR_PRIVATE_IRQS)
+ return &vcpu->arch.vgic_cpu.irq_phys_map_list;
+ else
+ return &vcpu->kvm->arch.vgic.irq_phys_map_list;
+}
+
+/**
+ * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
+ * @vcpu: The VCPU pointer
+ * @virt_irq: The virtual irq number
+ * @irq: The Linux IRQ number
+ *
+ * Establish a mapping between a guest visible irq (@virt_irq) and a
+ * Linux irq (@irq). On injection, @virt_irq will be associated with
+ * the physical interrupt represented by @irq. This mapping can be
+ * established multiple times as long as the parameters are the same.
+ *
+ * Returns a valid pointer on success, and an error pointer otherwise
+ */
+struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
+ int virt_irq, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
+ struct irq_phys_map *map;
+ struct irq_phys_map_entry *entry;
+ struct irq_desc *desc;
+ struct irq_data *data;
+ int phys_irq;
+
+ desc = irq_to_desc(irq);
+ if (!desc) {
+ kvm_err("%s: no interrupt descriptor\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ data = irq_desc_get_irq_data(desc);
+ while (data->parent_data)
+ data = data->parent_data;
+
+ phys_irq = data->hwirq;
+
+ /* Create a new mapping */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&dist->irq_phys_map_lock);
+
+ /* Try to match an existing mapping */
+ map = vgic_irq_map_search(vcpu, virt_irq);
+ if (map) {
+ /* Make sure this mapping matches */
+ if (map->phys_irq != phys_irq ||
+ map->irq != irq)
+ map = ERR_PTR(-EINVAL);
+
+ /* Found an existing, valid mapping */
+ goto out;
+ }
+
+ map = &entry->map;
+ map->virt_irq = virt_irq;
+ map->phys_irq = phys_irq;
+ map->irq = irq;
+
+ list_add_tail_rcu(&entry->entry, root);
+
+out:
+ spin_unlock(&dist->irq_phys_map_lock);
+ /* If we've found a hit in the existing list, free the useless
+ * entry */
+ if (IS_ERR(map) || map != &entry->map)
+ kfree(entry);
+ return map;
+}
+
+static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
+ int virt_irq)
+{
+ struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
+ struct irq_phys_map_entry *entry;
+ struct irq_phys_map *map;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(entry, root, entry) {
+ map = &entry->map;
+ if (map->virt_irq == virt_irq) {
+ rcu_read_unlock();
+ return map;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
+{
+ struct irq_phys_map_entry *entry;
+
+ entry = container_of(rcu, struct irq_phys_map_entry, rcu);
+ kfree(entry);
+}
+
+/**
+ * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
+ *
+ * Return the logical active state of a mapped interrupt. This doesn't
+ * necessarily reflects the current HW state.
+ */
+bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
+{
+ BUG_ON(!map);
+ return map->active;
+}
+
+/**
+ * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
+ *
+ * Set the logical active state of a mapped interrupt. This doesn't
+ * immediately affects the HW state.
+ */
+void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
+{
+ BUG_ON(!map);
+ map->active = active;
+}
+
+/**
+ * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
+ * @vcpu: The VCPU pointer
+ * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
+ *
+ * Remove an existing mapping between virtual and physical interrupts.
+ */
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ struct irq_phys_map_entry *entry;
+ struct list_head *root;
+
+ if (!map)
+ return -EINVAL;
+
+ root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
+
+ spin_lock(&dist->irq_phys_map_lock);
+
+ list_for_each_entry(entry, root, entry) {
+ if (&entry->map == map) {
+ list_del_rcu(&entry->entry);
+ call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
+ break;
+ }
+ }
+
+ spin_unlock(&dist->irq_phys_map_lock);
+
+ return 0;
+}
+
+static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct irq_phys_map_entry *entry;
+
+ spin_lock(&dist->irq_phys_map_lock);
+
+ list_for_each_entry(entry, root, entry) {
+ list_del_rcu(&entry->entry);
+ call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
+ }
+
+ spin_unlock(&dist->irq_phys_map_lock);
+}
+
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
kfree(vgic_cpu->active_shared);
kfree(vgic_cpu->pend_act_shared);
kfree(vgic_cpu->vgic_irq_lr_map);
+ vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
vgic_cpu->pending_shared = NULL;
vgic_cpu->active_shared = NULL;
vgic_cpu->pend_act_shared = NULL;
return 0;
}
+/**
+ * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
+ *
+ * No memory allocation should be performed here, only static init.
+ */
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
+}
+
/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
*
kfree(dist->irq_spi_target);
kfree(dist->irq_pending_on_cpu);
kfree(dist->irq_active_on_cpu);
+ vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
dist->irq_sgi_sources = NULL;
dist->irq_spi_cpu = NULL;
dist->irq_spi_target = NULL;
return 0;
}
+/**
+ * kvm_vgic_early_init - Earliest possible vgic initialization stage
+ *
+ * No memory allocation should be performed here, only static init.
+ */
+void kvm_vgic_early_init(struct kvm *kvm)
+{
+ spin_lock_init(&kvm->arch.vgic.lock);
+ spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
+ INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
+}
+
int kvm_vgic_create(struct kvm *kvm, u32 type)
{
int i, vcpu_lock_idx = -1, ret;
if (ret)
goto out_unlock;
- spin_lock_init(&kvm->arch.vgic.lock);
kvm->arch.vgic.in_kernel = true;
kvm->arch.vgic.vgic_model = type;
kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
goto out;
r = -EINVAL;
- if (ue->flags)
+ if (ue->flags) {
+ kfree(e);
goto out;
+ }
r = setup_routing_entry(new, e, ue);
- if (r)
+ if (r) {
+ kfree(e);
goto out;
+ }
++ue;
}
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static unsigned int halt_poll_ns;
+/* halt polling only reduces halt latency by 5-7 us, 500us is enough */
+static unsigned int halt_poll_ns = 500000;
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+/* Default doubles per-vcpu halt_poll_ns. */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+
+/* Default resets per-vcpu halt_poll_ns . */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+
/*
* Ordering of locks:
*
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
vcpu->pid = NULL;
+ vcpu->halt_poll_ns = 0;
init_waitqueue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int old, val;
+
+ old = val = vcpu->halt_poll_ns;
+ /* 10us base */
+ if (val == 0 && halt_poll_ns_grow)
+ val = 10000;
+ else
+ val *= halt_poll_ns_grow;
+
+ vcpu->halt_poll_ns = val;
+ trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
+}
+
+static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int old, val;
+
+ old = val = vcpu->halt_poll_ns;
+ if (halt_poll_ns_shrink == 0)
+ val = 0;
+ else
+ val /= halt_poll_ns_shrink;
+
+ vcpu->halt_poll_ns = val;
+ trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
+}
+
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
if (kvm_arch_vcpu_runnable(vcpu)) {
ktime_t start, cur;
DEFINE_WAIT(wait);
bool waited = false;
+ u64 block_ns;
start = cur = ktime_get();
- if (halt_poll_ns) {
- ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+ if (vcpu->halt_poll_ns) {
+ ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
do {
/*
cur = ktime_get();
out:
- trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
+ block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+ if (halt_poll_ns) {
+ if (block_ns <= vcpu->halt_poll_ns)
+ ;
+ /* we had a long block, shrink polling */
+ else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ shrink_halt_poll_ns(vcpu);
+ /* we had a short halt and our poll time is too small */
+ else if (vcpu->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
+ grow_halt_poll_ns(vcpu);
+ }
+
+ trace_kvm_vcpu_wakeup(block_ns, waited);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_block);