From: Jon Medhurst Date: Thu, 18 Jul 2013 10:49:27 +0000 (+0100) Subject: Merge branches 'master-arm-multi_pmu_v2', 'master-config-fragments', 'master-hw-bkpt... X-Git-Tag: firefly_0821_release~3680^2~237^2 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=a0e1bccdf9f1c4d68ea024e4254dbbd1eff96a4a;p=firefly-linux-kernel-4.4.55.git Merge branches 'master-arm-multi_pmu_v2', 'master-config-fragments', 'master-hw-bkpt-fix', 'master-misc-patches' and 'master-task-placement-v2-updates' into big-LITTLE-MP-master-v19 Updates: ------- - Rebased over 3.10 final - Differences from big-LITTLE-MP-master-v18 - New Patches: - master-config-fragments: 1 new patch - "config: Disable priority filtering for HMP Scheduler" - master-misc-patches: 1 new patch - "mm: make vmstat_update periodic run conditional" - New Branches: - master-task-placement-v2-updates: 7 patches New patches from ARM added in a new topic branch stacked on top of master-task-placement-v2-sysfs... - Revert "sched: Enable HMP priority filter by default" - "HMP: Use unweighted load for hmp migration decisions" - "HMP: Select least-loaded CPU when performing HMP Migrations" - "HMP: Avoid multiple calls to hmp_domain_min_load in fast path" - "HMP: Force new non-kernel tasks onto big CPUs until load stabilises" - "sched: Restrict nohz balance kicks to stay in the HMP domain" - "HMP: experimental: Force all rt tasks to start on little domain." Commands used for merge: ----------------------- $ git checkout -b big-LITTLE-MP-master-v19 v3.10 $ git merge master-arm-multi_pmu_v2 master-config-fragments \ master-hw-bkpt-fix master-misc-patches master-task-placement-v2 \ master-task-placement-v2-sysfs master-task-placement-v2-updates --- a0e1bccdf9f1c4d68ea024e4254dbbd1eff96a4a diff --cc arch/arm/include/asm/topology.h index 611edefaeaf1,58b8b84adcd2,58b8b84adcd2,58b8b84adcd2,5692ba11322d..983fa7c153a2 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@@@@@ -26,8 -26,7 -26,7 -26,7 -26,38 +26,39 @@@@@@ extern struct cputopo_arm cpu_topology[ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); ++++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask); +++ ++++ #ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE ++++ /* Common values for CPUs */ ++++ #ifndef SD_CPU_INIT ++++ #define SD_CPU_INIT (struct sched_domain) { \ ++++ .min_interval = 1, \ ++++ .max_interval = 4, \ ++++ .busy_factor = 64, \ ++++ .imbalance_pct = 125, \ ++++ .cache_nice_tries = 1, \ ++++ .busy_idx = 2, \ ++++ .idle_idx = 1, \ ++++ .newidle_idx = 0, \ ++++ .wake_idx = 0, \ ++++ .forkexec_idx = 0, \ ++++ \ ++++ .flags = 0*SD_LOAD_BALANCE \ ++++ | 1*SD_BALANCE_NEWIDLE \ ++++ | 1*SD_BALANCE_EXEC \ ++++ | 1*SD_BALANCE_FORK \ ++++ | 0*SD_BALANCE_WAKE \ ++++ | 1*SD_WAKE_AFFINE \ ++++ | 0*SD_SHARE_CPUPOWER \ ++++ | 0*SD_SHARE_PKG_RESOURCES \ ++++ | 0*SD_SERIALIZE \ ++++ , \ ++++ .last_balance = jiffies, \ ++++ .balance_interval = 1, \ ++++ } ++++ #endif ++++ #endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */ + #else static inline void init_cpu_topology(void) { } diff --cc arch/arm/kernel/topology.c index e9ac11c85922,c5a59546a256,c5a59546a256,c5a59546a256,4459c0b4e915..677da58d9e88 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@@@@@ -289,33 -289,6 -289,6 -289,6 -290,113 +290,140 @@@@@@ void store_cpu_topology(unsigned int cp cpu_topology[cpuid].socket_id, mpidr); } ++++ ++++ #ifdef CONFIG_SCHED_HMP ++++ ++++ static const char * const little_cores[] = { ++++ "arm,cortex-a7", ++++ NULL, ++++ }; ++++ ++++ static bool is_little_cpu(struct device_node *cn) ++++ { ++++ const char * const *lc; ++++ for (lc = little_cores; *lc; lc++) ++++ if (of_device_is_compatible(cn, *lc)) ++++ return true; ++++ return false; ++++ } ++++ ++++ void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, ++++ struct cpumask *slow) ++++ { ++++ struct device_node *cn = NULL; ++++ int cpu; ++++ ++++ cpumask_clear(fast); ++++ cpumask_clear(slow); ++++ ++++ /* ++++ * Use the config options if they are given. This helps testing ++++ * HMP scheduling on systems without a big.LITTLE architecture. ++++ */ ++++ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) { ++++ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast)) ++++ WARN(1, "Failed to parse HMP fast cpu mask!\n"); ++++ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow)) ++++ WARN(1, "Failed to parse HMP slow cpu mask!\n"); ++++ return; ++++ } ++++ ++++ /* ++++ * Else, parse device tree for little cores. ++++ */ ++++ while ((cn = of_find_node_by_type(cn, "cpu"))) { ++++ ++++ const u32 *mpidr; ++++ int len; ++++ ++++ mpidr = of_get_property(cn, "reg", &len); ++++ if (!mpidr || len != 4) { ++++ pr_err("* %s missing reg property\n", cn->full_name); ++++ continue; ++++ } ++++ ++++ cpu = get_logical_index(be32_to_cpup(mpidr)); ++++ if (cpu == -EINVAL) { ++++ pr_err("couldn't get logical index for mpidr %x\n", ++++ be32_to_cpup(mpidr)); ++++ break; ++++ } ++++ ++++ if (is_little_cpu(cn)) ++++ cpumask_set_cpu(cpu, slow); ++++ else ++++ cpumask_set_cpu(cpu, fast); ++++ } ++++ ++++ if (!cpumask_empty(fast) && !cpumask_empty(slow)) ++++ return; ++++ ++++ /* ++++ * We didn't find both big and little cores so let's call all cores ++++ * fast as this will keep the system running, with all cores being ++++ * treated equal. ++++ */ ++++ cpumask_setall(fast); ++++ cpumask_clear(slow); ++++ } ++++ ++++ struct cpumask hmp_slow_cpu_mask; ++++ ++++ void __init arch_get_hmp_domains(struct list_head *hmp_domains_list) ++++ { ++++ struct cpumask hmp_fast_cpu_mask; ++++ struct hmp_domain *domain; ++++ ++++ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask); ++++ ++++ /* ++++ * Initialize hmp_domains ++++ * Must be ordered with respect to compute capacity. ++++ * Fastest domain at head of list. ++++ */ ++++ if(!cpumask_empty(&hmp_slow_cpu_mask)) { ++++ domain = (struct hmp_domain *) ++++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL); ++++ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask); ++++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus); ++++ list_add(&domain->hmp_domains, hmp_domains_list); ++++ } ++++ domain = (struct hmp_domain *) ++++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL); ++++ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask); ++++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus); ++++ list_add(&domain->hmp_domains, hmp_domains_list); ++++ } ++++ #endif /* CONFIG_SCHED_HMP */ ++++ ++++ ++++/* ++++ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster ++++ * @socket_id: cluster HW identifier ++++ * @cluster_mask: the cpumask location to be initialized, modified by the ++++ * function only if return value == 0 ++++ * ++++ * Return: ++++ * ++++ * 0 on success ++++ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id ++++ */ ++++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask) ++++{ ++++ int cpu; ++++ ++++ if (!cluster_mask) ++++ return -EINVAL; ++++ ++++ for_each_online_cpu(cpu) ++++ if (socket_id == topology_physical_package_id(cpu)) { ++++ cpumask_copy(cluster_mask, topology_core_cpumask(cpu)); ++++ return 0; ++++ } ++++ ++++ return -EINVAL; ++++} ++++ /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array