From: Olof Johansson Date: Fri, 19 Apr 2013 18:37:28 +0000 (-0700) Subject: Merge branch 'omap/boards' into late/cleanup X-Git-Tag: firefly_0821_release~3680^2~544^2~10 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=ff57270de9c42ddb63736bb95912a24ad315e664;hp=06993d6a8a51b90fc78c8c3cb7a81c9b4b88c683;p=firefly-linux-kernel-4.4.55.git Merge branch 'omap/boards' into late/cleanup * omap/boards: ARM: OMAP: board-4430sdp: Provide regulator to pwm-backlight ARM: OMAP: zoom: Use pwm stack for lcd and keyboard backlight ARM: OMAP2+: omap2plus_defconfig: Add support for BMP085 pressure sensor omap2+: Remove useless Makefile line omap2+: Remove useless Makefile line ARM: OMAP: RX-51: add missing regulator supply definitions for lis3lv02d ARM: OMAP1: fix omap_udc registration --- diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile index da93bcbc74c1..c3be068f1c96 100644 --- a/arch/arm/mach-mvebu/Makefile +++ b/arch/arm/mach-mvebu/Makefile @@ -5,6 +5,6 @@ AFLAGS_coherency_ll.o := -Wa,-march=armv7-a obj-y += system-controller.o obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o -obj-$(CONFIG_ARCH_MVEBU) += addr-map.o coherency.o coherency_ll.o pmsu.o irq-armada-370-xp.o +obj-$(CONFIG_ARCH_MVEBU) += addr-map.o coherency.o coherency_ll.o pmsu.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c index a5ea616d6d12..433e8c5343b2 100644 --- a/arch/arm/mach-mvebu/armada-370-xp.c +++ b/arch/arm/mach-mvebu/armada-370-xp.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include #include @@ -54,6 +56,10 @@ void __init armada_370_xp_init_early(void) * to make sure such the allocations won't fail. */ init_dma_coherent_pool_size(SZ_1M); + +#ifdef CONFIG_CACHE_L2X0 + l2x0_of_init(0, ~0UL); +#endif } static void __init armada_370_xp_dt_init(void) @@ -72,8 +78,7 @@ DT_MACHINE_START(ARMADA_XP_DT, "Marvell Armada 370/XP (Device Tree)") .init_machine = armada_370_xp_dt_init, .map_io = armada_370_xp_map_io, .init_early = armada_370_xp_init_early, - .init_irq = armada_370_xp_init_irq, - .handle_irq = armada_370_xp_handle_irq, + .init_irq = irqchip_init, .init_time = armada_370_xp_timer_and_clk_init, .restart = mvebu_restart, .dt_compat = armada_370_xp_dt_compat, diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c deleted file mode 100644 index 6a9195e10579..000000000000 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Marvell Armada 370 and Armada XP SoC IRQ handling - * - * Copyright (C) 2012 Marvell - * - * Lior Amsalem - * Gregory CLEMENT - * Thomas Petazzoni - * Ben Dooks - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Interrupt Controller Registers Map */ -#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) -#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) - -#define ARMADA_370_XP_INT_CONTROL (0x00) -#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) -#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) -#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) - -#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) - -#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4) -#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc) -#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8) - -#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) - -#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) - -#define ACTIVE_DOORBELLS (8) - -static DEFINE_RAW_SPINLOCK(irq_controller_lock); - -static void __iomem *per_cpu_int_base; -static void __iomem *main_int_base; -static struct irq_domain *armada_370_xp_mpic_domain; - -/* - * In SMP mode: - * For shared global interrupts, mask/unmask global enable bit - * For CPU interrtups, mask/unmask the calling CPU's bit - */ -static void armada_370_xp_irq_mask(struct irq_data *d) -{ -#ifdef CONFIG_SMP - irq_hw_number_t hwirq = irqd_to_hwirq(d); - - if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) - writel(hwirq, main_int_base + - ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); - else - writel(hwirq, per_cpu_int_base + - ARMADA_370_XP_INT_SET_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#endif -} - -static void armada_370_xp_irq_unmask(struct irq_data *d) -{ -#ifdef CONFIG_SMP - irq_hw_number_t hwirq = irqd_to_hwirq(d); - - if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) - writel(hwirq, main_int_base + - ARMADA_370_XP_INT_SET_ENABLE_OFFS); - else - writel(hwirq, per_cpu_int_base + - ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#endif -} - -#ifdef CONFIG_SMP -static int armada_xp_set_affinity(struct irq_data *d, - const struct cpumask *mask_val, bool force) -{ - unsigned long reg; - unsigned long new_mask = 0; - unsigned long online_mask = 0; - unsigned long count = 0; - irq_hw_number_t hwirq = irqd_to_hwirq(d); - int cpu; - - for_each_cpu(cpu, mask_val) { - new_mask |= 1 << cpu_logical_map(cpu); - count++; - } - - /* - * Forbid mutlicore interrupt affinity - * This is required since the MPIC HW doesn't limit - * several CPUs from acknowledging the same interrupt. - */ - if (count > 1) - return -EINVAL; - - for_each_cpu(cpu, cpu_online_mask) - online_mask |= 1 << cpu_logical_map(cpu); - - raw_spin_lock(&irq_controller_lock); - - reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - reg = (reg & (~online_mask)) | new_mask; - writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - - raw_spin_unlock(&irq_controller_lock); - - return 0; -} -#endif - -static struct irq_chip armada_370_xp_irq_chip = { - .name = "armada_370_xp_irq", - .irq_mask = armada_370_xp_irq_mask, - .irq_mask_ack = armada_370_xp_irq_mask, - .irq_unmask = armada_370_xp_irq_unmask, -#ifdef CONFIG_SMP - .irq_set_affinity = armada_xp_set_affinity, -#endif -}; - -static int armada_370_xp_mpic_irq_map(struct irq_domain *h, - unsigned int virq, irq_hw_number_t hw) -{ - armada_370_xp_irq_mask(irq_get_irq_data(virq)); - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); - irq_set_status_flags(virq, IRQ_LEVEL); - - if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { - irq_set_percpu_devid(virq); - irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, - handle_percpu_devid_irq); - - } else { - irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, - handle_level_irq); - } - set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); - - return 0; -} - -#ifdef CONFIG_SMP -void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq) -{ - int cpu; - unsigned long map = 0; - - /* Convert our logical CPU mask into a physical one. */ - for_each_cpu(cpu, mask) - map |= 1 << cpu_logical_map(cpu); - - /* - * Ensure that stores to Normal memory are visible to the - * other CPUs before issuing the IPI. - */ - dsb(); - - /* submit softirq */ - writel((map << 8) | irq, main_int_base + - ARMADA_370_XP_SW_TRIG_INT_OFFS); -} - -void armada_xp_mpic_smp_cpu_init(void) -{ - /* Clear pending IPIs */ - writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); - - /* Enable first 8 IPIs */ - writel((1 << ACTIVE_DOORBELLS) - 1, per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_MSK_OFFS); - - /* Unmask IPI interrupt */ - writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -} -#endif /* CONFIG_SMP */ - -static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { - .map = armada_370_xp_mpic_irq_map, - .xlate = irq_domain_xlate_onecell, -}; - -static int __init armada_370_xp_mpic_of_init(struct device_node *node, - struct device_node *parent) -{ - u32 control; - - main_int_base = of_iomap(node, 0); - per_cpu_int_base = of_iomap(node, 1); - - BUG_ON(!main_int_base); - BUG_ON(!per_cpu_int_base); - - control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); - - armada_370_xp_mpic_domain = - irq_domain_add_linear(node, (control >> 2) & 0x3ff, - &armada_370_xp_mpic_irq_ops, NULL); - - if (!armada_370_xp_mpic_domain) - panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n"); - - irq_set_default_host(armada_370_xp_mpic_domain); - -#ifdef CONFIG_SMP - armada_xp_mpic_smp_cpu_init(); - - /* - * Set the default affinity from all CPUs to the boot cpu. - * This is required since the MPIC doesn't limit several CPUs - * from acknowledging the same interrupt. - */ - cpumask_clear(irq_default_affinity); - cpumask_set_cpu(smp_processor_id(), irq_default_affinity); - -#endif - - return 0; -} - -asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs - *regs) -{ - u32 irqstat, irqnr; - - do { - irqstat = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_CPU_INTACK_OFFS); - irqnr = irqstat & 0x3FF; - - if (irqnr > 1022) - break; - - if (irqnr > 0) { - irqnr = irq_find_mapping(armada_370_xp_mpic_domain, - irqnr); - handle_IRQ(irqnr, regs); - continue; - } -#ifdef CONFIG_SMP - /* IPI Handling */ - if (irqnr == 0) { - u32 ipimask, ipinr; - - ipimask = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) - & 0xFF; - - writel(0x0, per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); - - /* Handle all pending doorbells */ - for (ipinr = 0; ipinr < ACTIVE_DOORBELLS; ipinr++) { - if (ipimask & (0x1 << ipinr)) - handle_IPI(ipinr, regs); - } - continue; - } -#endif - - } while (1); -} - -static const struct of_device_id mpic_of_match[] __initconst = { - {.compatible = "marvell,mpic", .data = armada_370_xp_mpic_of_init}, - {}, -}; - -void __init armada_370_xp_init_irq(void) -{ - of_irq_init(mpic_of_match); -#ifdef CONFIG_CACHE_L2X0 - l2x0_of_init(0, ~0UL); -#endif -} diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig index 903da8eb886c..cdd05f2e67ee 100644 --- a/arch/arm/mach-omap1/Kconfig +++ b/arch/arm/mach-omap1/Kconfig @@ -55,12 +55,6 @@ config MACH_OMAP_H3 TI OMAP 1710 H3 board support. Say Y here if you have such a board. -config MACH_OMAP_HTCWIZARD - bool "HTC Wizard" - depends on ARCH_OMAP850 - help - HTC Wizard smartphone support (AKA QTEK 9100, ...) - config MACH_HERALD bool "HTC Herald" depends on ARCH_OMAP850 diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 8111cd9ff3e5..b9c0ed3f648c 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -408,7 +408,7 @@ config OMAP3_SDRC_AC_TIMING config OMAP4_ERRATA_I688 bool "OMAP4 errata: Async Bridge Corruption" - depends on ARCH_OMAP4 && !ARCH_MULTIPLATFORM + depends on (ARCH_OMAP4 || SOC_OMAP5) && !ARCH_MULTIPLATFORM select ARCH_HAS_BARRIERS help If a data is stalled inside asynchronous bridge because of back diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index a3e0aaa4886b..cb0596b631cf 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c @@ -166,7 +166,7 @@ static void __init sdp2430_display_init(void) omap_display_init(&sdp2430_dss_data); } -#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91x_MODULE) +#if IS_ENABLED(CONFIG_SMC91X) static struct omap_smc91x_platform_data board_smc91x_data = { .cs = 5, diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c index 812c829fa46f..5b4ec51c385f 100644 --- a/arch/arm/mach-omap2/board-h4.c +++ b/arch/arm/mach-omap2/board-h4.c @@ -246,7 +246,7 @@ static u32 is_gpmc_muxed(void) return 0; } -#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91x_MODULE) +#if IS_ENABLED(CONFIG_SMC91X) static struct omap_smc91x_platform_data board_smc91x_data = { .cs = 1, diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c index 476b82066cb6..7f091c85384e 100644 --- a/arch/arm/mach-omap2/cclock33xx_data.c +++ b/arch/arm/mach-omap2/cclock33xx_data.c @@ -958,6 +958,14 @@ int __init am33xx_clk_init(void) clk_set_parent(&timer3_fck, &sys_clkin_ck); clk_set_parent(&timer6_fck, &sys_clkin_ck); + /* + * The On-Chip 32K RC Osc clock is not an accurate clock-source as per + * the design/spec, so as a result, for example, timer which supposed + * to get expired @60Sec, but will expire somewhere ~@40Sec, which is + * not expected by any use-case, so change WDT1 clock source to PRCM + * 32KHz clock. + */ + clk_set_parent(&wdt1_fck, &clkdiv32k_ick); return 0; } diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index 3aed4b0b9563..3a0296cfcace 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -307,10 +307,10 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) _omap3_noncore_dpll_bypass(clk); /* - * Set jitter correction. No jitter correction for OMAP4 and 3630 - * since freqsel field is no longer present + * Set jitter correction. Jitter correction applicable for OMAP343X + * only since freqsel field is no longer present on other devices. */ - if (!soc_is_am33xx() && !cpu_is_omap44xx() && !cpu_is_omap3630()) { + if (cpu_is_omap343x()) { v = __raw_readl(dd->control_reg); v &= ~dd->freqsel_mask; v |= freqsel << __ffs(dd->freqsel_mask); @@ -480,29 +480,30 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, if (!dd) return -EINVAL; - __clk_prepare(dd->clk_bypass); - clk_enable(dd->clk_bypass); - __clk_prepare(dd->clk_ref); - clk_enable(dd->clk_ref); - if (__clk_get_rate(dd->clk_bypass) == rate && (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("%s: %s: set rate: entering bypass.\n", __func__, __clk_get_name(hw->clk)); + __clk_prepare(dd->clk_bypass); + clk_enable(dd->clk_bypass); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; + clk_disable(dd->clk_bypass); + __clk_unprepare(dd->clk_bypass); } else { + __clk_prepare(dd->clk_ref); + clk_enable(dd->clk_ref); + if (dd->last_rounded_rate != rate) rate = __clk_round_rate(hw->clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; - /* No freqsel on AM335x, OMAP4 and OMAP3630 */ - if (!soc_is_am33xx() && !cpu_is_omap44xx() && - !cpu_is_omap3630()) { + /* Freqsel is available only on OMAP343X devices */ + if (cpu_is_omap343x()) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); WARN_ON(!freqsel); @@ -514,6 +515,8 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, ret = omap3_noncore_dpll_program(clk, freqsel); if (!ret) new_parent = dd->clk_ref; + clk_disable(dd->clk_ref); + __clk_unprepare(dd->clk_ref); } /* * FIXME - this is all wrong. common code handles reparenting and @@ -525,11 +528,6 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, if (!ret) __clk_reparent(hw->clk, new_parent); - clk_disable(dd->clk_ref); - __clk_unprepare(dd->clk_ref); - clk_disable(dd->clk_bypass); - __clk_unprepare(dd->clk_bypass); - return 0; } diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c index b155500e84a8..b8208b4b1bd9 100644 --- a/arch/arm/mach-omap2/dsp.c +++ b/arch/arm/mach-omap2/dsp.c @@ -26,7 +26,7 @@ #include "control.h" #include "cm2xxx_3xxx.h" #include "prm2xxx_3xxx.h" -#ifdef CONFIG_BRIDGE_DVFS +#ifdef CONFIG_TIDSPBRIDGE_DVFS #include "omap-pm.h" #endif @@ -35,7 +35,7 @@ static struct platform_device *omap_dsp_pdev; static struct omap_dsp_platform_data omap_dsp_pdata __initdata = { -#ifdef CONFIG_BRIDGE_DVFS +#ifdef CONFIG_TIDSPBRIDGE_DVFS .dsp_set_min_opp = omap_pm_dsp_set_min_opp, .dsp_get_opp = omap_pm_dsp_get_opp, .cpu_set_freq = omap_pm_cpu_set_freq, diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 8a68f1ec66b9..ff0bc9e51aa7 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -529,22 +529,28 @@ void __init omap5xxx_check_revision(void) case 0xb942: switch (rev) { case 0: - default: omap_revision = OMAP5430_REV_ES1_0; + break; + case 1: + default: + omap_revision = OMAP5430_REV_ES2_0; } break; case 0xb998: switch (rev) { case 0: - default: omap_revision = OMAP5432_REV_ES1_0; + break; + case 1: + default: + omap_revision = OMAP5432_REV_ES2_0; } break; default: /* Unknown default to latest silicon rev as default*/ - omap_revision = OMAP5430_REV_ES1_0; + omap_revision = OMAP5430_REV_ES2_0; } pr_info("OMAP%04x ES%d.0\n", diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 5c445ca1e271..e210fa830f8d 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -277,6 +277,14 @@ static struct map_desc omap54xx_io_desc[] __initdata = { .length = L4_PER_54XX_SIZE, .type = MT_DEVICE, }, +#ifdef CONFIG_OMAP4_ERRATA_I688 + { + .virtual = OMAP4_SRAM_VA, + .pfn = __phys_to_pfn(OMAP4_SRAM_PA), + .length = PAGE_SIZE, + .type = MT_MEMORY_SO, + }, +#endif }; #endif @@ -329,6 +337,7 @@ void __init omap4_map_io(void) void __init omap5_map_io(void) { iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc)); + omap_barriers_init(); } #endif /* diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 708bb115a27f..2aeb928efdfd 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -240,15 +240,21 @@ void __iomem *omap4_get_sar_ram_base(void) */ static int __init omap4_sar_ram_init(void) { + unsigned long sar_base; + /* * To avoid code running on other OMAPs in * multi-omap builds */ - if (!cpu_is_omap44xx()) + if (cpu_is_omap44xx()) + sar_base = OMAP44XX_SAR_RAM_BASE; + else if (soc_is_omap54xx()) + sar_base = OMAP54XX_SAR_RAM_BASE; + else return -ENOMEM; /* Static mapping, never released */ - sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K); + sar_ram_base = ioremap(sar_base, SZ_16K); if (WARN_ON(!sar_ram_base)) return -ENOMEM; diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h index e170fe803b04..937417523b8e 100644 --- a/arch/arm/mach-omap2/omap4-sar-layout.h +++ b/arch/arm/mach-omap2/omap4-sar-layout.h @@ -48,13 +48,13 @@ #define SAR_BACKUP_STATUS_WAKEUPGEN 0x10 /* WakeUpGen save restore offset from OMAP54XX_SAR_RAM_BASE */ -#define OMAP5_WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x8d4) -#define OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x8e8) -#define OMAP5_WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x8fc) -#define OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x910) -#define OMAP5_AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x924) -#define OMAP5_AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x928) -#define OMAP5_AMBA_IF_MODE_OFFSET (SAR_BANK3_OFFSET + 0x92c) +#define OMAP5_WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x9dc) +#define OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x9f0) +#define OMAP5_WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0xa04) +#define OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0xa18) +#define OMAP5_AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0xa2c) +#define OMAP5_AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x930) +#define OMAP5_AMBA_IF_MODE_OFFSET (SAR_BANK3_OFFSET + 0xa34) #define OMAP5_SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x800) #endif diff --git a/arch/arm/mach-omap2/omap54xx.h b/arch/arm/mach-omap2/omap54xx.h index a2582bb3cab3..a086ba15868b 100644 --- a/arch/arm/mach-omap2/omap54xx.h +++ b/arch/arm/mach-omap2/omap54xx.h @@ -28,5 +28,6 @@ #define OMAP54XX_PRCM_MPU_BASE 0x48243000 #define OMAP54XX_SCM_BASE 0x4a002000 #define OMAP54XX_CTRL_BASE 0x4a002800 +#define OMAP54XX_SAR_RAM_BASE 0x4ae26000 #endif /* __ASM_SOC_OMAP555554XX_H */ diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index a202a4785104..5f33c2da6999 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -610,8 +610,6 @@ static int _enable_wakeup(struct omap_hwmod *oh, u32 *v) /* XXX test pwrdm_get_wken for this hwmod's subsystem */ - oh->_int_flags |= _HWMOD_WAKEUP_ENABLED; - return 0; } @@ -645,8 +643,6 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v) /* XXX test pwrdm_get_wken for this hwmod's subsystem */ - oh->_int_flags &= ~_HWMOD_WAKEUP_ENABLED; - return 0; } diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d5dc935f6060..fe5962921f07 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -482,15 +482,13 @@ struct omap_hwmod_omap4_prcm { * These are for internal use only and are managed by the omap_hwmod code. * * _HWMOD_NO_MPU_PORT: no path exists for the MPU to write to this module - * _HWMOD_WAKEUP_ENABLED: set when the omap_hwmod code has enabled ENAWAKEUP * _HWMOD_SYSCONFIG_LOADED: set when the OCP_SYSCONFIG value has been cached * _HWMOD_SKIP_ENABLE: set if hwmod enabled during init (HWMOD_INIT_NO_IDLE) - * causes the first call to _enable() to only update the pinmux */ #define _HWMOD_NO_MPU_PORT (1 << 0) -#define _HWMOD_WAKEUP_ENABLED (1 << 1) -#define _HWMOD_SYSCONFIG_LOADED (1 << 2) -#define _HWMOD_SKIP_ENABLE (1 << 3) +#define _HWMOD_SYSCONFIG_LOADED (1 << 1) +#define _HWMOD_SKIP_ENABLE (1 << 2) /* * omap_hwmod._state definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 26eee4a556ad..31bea1ce3de1 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -28,6 +28,7 @@ #include "prm-regbits-33xx.h" #include "i2c.h" #include "mmc.h" +#include "wd_timer.h" /* * IP blocks @@ -2087,8 +2088,21 @@ static struct omap_hwmod am33xx_uart6_hwmod = { }; /* 'wd_timer' class */ +static struct omap_hwmod_class_sysconfig wdt_sysc = { + .rev_offs = 0x0, + .sysc_offs = 0x10, + .syss_offs = 0x14, + .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE | + SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + SIDLE_SMART_WKUP), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + static struct omap_hwmod_class am33xx_wd_timer_hwmod_class = { .name = "wd_timer", + .sysc = &wdt_sysc, + .pre_shutdown = &omap2_wd_timer_disable, }; /* @@ -2099,6 +2113,7 @@ static struct omap_hwmod am33xx_wd_timer1_hwmod = { .name = "wd_timer2", .class = &am33xx_wd_timer_hwmod_class, .clkdm_name = "l4_wkup_clkdm", + .flags = HWMOD_SWSUP_SIDLE, .main_clk = "wdt1_fck", .prcm = { .omap4 = { diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 8e61d80bf6b3..89cad4a605dd 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -52,7 +52,6 @@ enum { #define ALREADYACTIVE_SWITCH 0 #define FORCEWAKEUP_SWITCH 1 #define LOWPOWERSTATE_SWITCH 2 -#define ERROR_SWITCH 3 /* pwrdm_list contains all registered struct powerdomains */ static LIST_HEAD(pwrdm_list); @@ -233,10 +232,7 @@ static u8 _pwrdm_save_clkdm_state_and_activate(struct powerdomain *pwrdm, { u8 sleep_switch; - if (curr_pwrst < 0) { - WARN_ON(1); - sleep_switch = ERROR_SWITCH; - } else if (curr_pwrst < PWRDM_POWER_ON) { + if (curr_pwrst < PWRDM_POWER_ON) { if (curr_pwrst > pwrst && pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE && arch_pwrdm->pwrdm_set_lowpwrstchange) { @@ -1091,7 +1087,8 @@ int pwrdm_post_transition(struct powerdomain *pwrdm) */ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 pwrst) { - u8 curr_pwrst, next_pwrst, sleep_switch; + u8 next_pwrst, sleep_switch; + int curr_pwrst; int ret = 0; bool hwsup = false; @@ -1107,16 +1104,17 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 pwrst) pwrdm_lock(pwrdm); curr_pwrst = pwrdm_read_pwrst(pwrdm); + if (curr_pwrst < 0) { + ret = -EINVAL; + goto osps_out; + } + next_pwrst = pwrdm_read_next_pwrst(pwrdm); if (curr_pwrst == pwrst && next_pwrst == pwrst) goto osps_out; sleep_switch = _pwrdm_save_clkdm_state_and_activate(pwrdm, curr_pwrst, pwrst, &hwsup); - if (sleep_switch == ERROR_SWITCH) { - ret = -EINVAL; - goto osps_out; - } ret = pwrdm_set_next_pwrst(pwrdm, pwrst); if (ret) diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index d35f98aabf7a..415c7e0c9393 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -81,13 +81,13 @@ static struct prm_reset_src_map omap44xx_prm_reset_src_map[] = { /* Read a register in a CM/PRM instance in the PRM module */ u32 omap4_prm_read_inst_reg(s16 inst, u16 reg) { - return __raw_readl(OMAP44XX_PRM_REGADDR(inst, reg)); + return __raw_readl(prm_base + inst + reg); } /* Write into a register in a CM/PRM instance in the PRM module */ void omap4_prm_write_inst_reg(u32 val, s16 inst, u16 reg) { - __raw_writel(val, OMAP44XX_PRM_REGADDR(inst, reg)); + __raw_writel(val, prm_base + inst + reg); } /* Read-modify-write a register in a PRM module. Caller must lock */ @@ -650,7 +650,7 @@ static struct prm_ll_data omap44xx_prm_ll_data = { int __init omap44xx_prm_init(void) { - if (!cpu_is_omap44xx()) + if (!cpu_is_omap44xx() && !soc_is_omap54xx()) return 0; return prm_register(&omap44xx_prm_ll_data); diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index c62116bbc760..18fdeeb3a44a 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -413,7 +413,9 @@ IS_OMAP_TYPE(3430, 0x3430) #define OMAP54XX_CLASS 0x54000054 #define OMAP5430_REV_ES1_0 (OMAP54XX_CLASS | (0x30 << 16) | (0x10 << 8)) +#define OMAP5430_REV_ES2_0 (OMAP54XX_CLASS | (0x30 << 16) | (0x20 << 8)) #define OMAP5432_REV_ES1_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x10 << 8)) +#define OMAP5432_REV_ES2_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x20 << 8)) void omap2xxx_check_revision(void); void omap3xxx_check_revision(void); diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index f62b509ed08d..d00d89c93f1c 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -62,6 +62,7 @@ #define OMAP2_MPU_SOURCE "sys_ck" #define OMAP3_MPU_SOURCE OMAP2_MPU_SOURCE #define OMAP4_MPU_SOURCE "sys_clkin_ck" +#define OMAP5_MPU_SOURCE "sys_clkin" #define OMAP2_32K_SOURCE "func_32k_ck" #define OMAP3_32K_SOURCE "omap_32k_fck" #define OMAP4_32K_SOURCE "sys_32k_ck" @@ -487,7 +488,7 @@ static void __init realtime_counter_init(void) pr_err("%s: ioremap failed\n", __func__); return; } - sys_clk = clk_get(NULL, "sys_clkin_ck"); + sys_clk = clk_get(NULL, OMAP5_MPU_SOURCE); if (IS_ERR(sys_clk)) { pr_err("%s: failed to get system clock handle\n", __func__); iounmap(base); @@ -620,7 +621,7 @@ void __init omap4_local_timer_init(void) #ifdef CONFIG_SOC_OMAP5 OMAP_SYS_32K_TIMER_INIT(5, 1, OMAP4_32K_SOURCE, "ti,timer-alwon", - 2, OMAP4_MPU_SOURCE); + 2, OMAP5_MPU_SOURCE); void __init omap5_realtime_timer_init(void) { int err; diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 98e3b87bdf1b..dae27a77c1e1 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o +obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o obj-$(CONFIG_METAG) += irq-metag-ext.o obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c new file mode 100644 index 000000000000..ad1e6422a732 --- /dev/null +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -0,0 +1,294 @@ +/* + * Marvell Armada 370 and Armada XP SoC IRQ handling + * + * Copyright (C) 2012 Marvell + * + * Lior Amsalem + * Gregory CLEMENT + * Thomas Petazzoni + * Ben Dooks + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irqchip.h" + +/* Interrupt Controller Registers Map */ +#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) +#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) + +#define ARMADA_370_XP_INT_CONTROL (0x00) +#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) +#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) +#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) + +#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) + +#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4) +#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc) +#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8) + +#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) + +#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) + +#define IPI_DOORBELL_START (0) +#define IPI_DOORBELL_END (8) +#define IPI_DOORBELL_MASK 0xFF + +static DEFINE_RAW_SPINLOCK(irq_controller_lock); + +static void __iomem *per_cpu_int_base; +static void __iomem *main_int_base; +static struct irq_domain *armada_370_xp_mpic_domain; + +/* + * In SMP mode: + * For shared global interrupts, mask/unmask global enable bit + * For CPU interrtups, mask/unmask the calling CPU's bit + */ +static void armada_370_xp_irq_mask(struct irq_data *d) +{ +#ifdef CONFIG_SMP + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hwirq, main_int_base + + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); + else + writel(hwirq, per_cpu_int_base + + ARMADA_370_XP_INT_SET_MASK_OFFS); +#else + writel(irqd_to_hwirq(d), + per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); +#endif +} + +static void armada_370_xp_irq_unmask(struct irq_data *d) +{ +#ifdef CONFIG_SMP + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hwirq, main_int_base + + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + else + writel(hwirq, per_cpu_int_base + + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); +#else + writel(irqd_to_hwirq(d), + per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); +#endif +} + +#ifdef CONFIG_SMP +static int armada_xp_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + unsigned long reg; + unsigned long new_mask = 0; + unsigned long online_mask = 0; + unsigned long count = 0; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int cpu; + + for_each_cpu(cpu, mask_val) { + new_mask |= 1 << cpu_logical_map(cpu); + count++; + } + + /* + * Forbid mutlicore interrupt affinity + * This is required since the MPIC HW doesn't limit + * several CPUs from acknowledging the same interrupt. + */ + if (count > 1) + return -EINVAL; + + for_each_cpu(cpu, cpu_online_mask) + online_mask |= 1 << cpu_logical_map(cpu); + + raw_spin_lock(&irq_controller_lock); + + reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); + reg = (reg & (~online_mask)) | new_mask; + writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); + + raw_spin_unlock(&irq_controller_lock); + + return 0; +} +#endif + +static struct irq_chip armada_370_xp_irq_chip = { + .name = "armada_370_xp_irq", + .irq_mask = armada_370_xp_irq_mask, + .irq_mask_ack = armada_370_xp_irq_mask, + .irq_unmask = armada_370_xp_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = armada_xp_set_affinity, +#endif +}; + +static int armada_370_xp_mpic_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hw) +{ + armada_370_xp_irq_mask(irq_get_irq_data(virq)); + writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + irq_set_status_flags(virq, IRQ_LEVEL); + + if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { + irq_set_percpu_devid(virq); + irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, + handle_percpu_devid_irq); + + } else { + irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, + handle_level_irq); + } + set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); + + return 0; +} + +#ifdef CONFIG_SMP +void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq) +{ + int cpu; + unsigned long map = 0; + + /* Convert our logical CPU mask into a physical one. */ + for_each_cpu(cpu, mask) + map |= 1 << cpu_logical_map(cpu); + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(); + + /* submit softirq */ + writel((map << 8) | irq, main_int_base + + ARMADA_370_XP_SW_TRIG_INT_OFFS); +} + +void armada_xp_mpic_smp_cpu_init(void) +{ + /* Clear pending IPIs */ + writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + + /* Enable first 8 IPIs */ + writel(IPI_DOORBELL_MASK, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + + /* Unmask IPI interrupt */ + writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); +} +#endif /* CONFIG_SMP */ + +static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { + .map = armada_370_xp_mpic_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static asmlinkage void __exception_irq_entry +armada_370_xp_handle_irq(struct pt_regs *regs) +{ + u32 irqstat, irqnr; + + do { + irqstat = readl_relaxed(per_cpu_int_base + + ARMADA_370_XP_CPU_INTACK_OFFS); + irqnr = irqstat & 0x3FF; + + if (irqnr > 1022) + break; + + if (irqnr > 0) { + irqnr = irq_find_mapping(armada_370_xp_mpic_domain, + irqnr); + handle_IRQ(irqnr, regs); + continue; + } +#ifdef CONFIG_SMP + /* IPI Handling */ + if (irqnr == 0) { + u32 ipimask, ipinr; + + ipimask = readl_relaxed(per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) + & IPI_DOORBELL_MASK; + + writel(~IPI_DOORBELL_MASK, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + + /* Handle all pending doorbells */ + for (ipinr = IPI_DOORBELL_START; + ipinr < IPI_DOORBELL_END; ipinr++) { + if (ipimask & (0x1 << ipinr)) + handle_IPI(ipinr, regs); + } + continue; + } +#endif + + } while (1); +} + +static int __init armada_370_xp_mpic_of_init(struct device_node *node, + struct device_node *parent) +{ + u32 control; + + main_int_base = of_iomap(node, 0); + per_cpu_int_base = of_iomap(node, 1); + + BUG_ON(!main_int_base); + BUG_ON(!per_cpu_int_base); + + control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); + + armada_370_xp_mpic_domain = + irq_domain_add_linear(node, (control >> 2) & 0x3ff, + &armada_370_xp_mpic_irq_ops, NULL); + + if (!armada_370_xp_mpic_domain) + panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n"); + + irq_set_default_host(armada_370_xp_mpic_domain); + +#ifdef CONFIG_SMP + armada_xp_mpic_smp_cpu_init(); + + /* + * Set the default affinity from all CPUs to the boot cpu. + * This is required since the MPIC doesn't limit several CPUs + * from acknowledging the same interrupt. + */ + cpumask_clear(irq_default_affinity); + cpumask_set_cpu(smp_processor_id(), irq_default_affinity); + +#endif + + set_handle_irq(armada_370_xp_handle_irq); + + return 0; +} + +IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);