Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
authorAmit Pundir <amit.pundir@linaro.org>
Tue, 15 Nov 2016 12:46:35 +0000 (18:16 +0530)
committerAmit Pundir <amit.pundir@linaro.org>
Tue, 15 Nov 2016 13:03:34 +0000 (18:33 +0530)
Conflicts:
* arch/arm64/include/asm/assembler.h
    Pick changes from AOSP Change-Id: I450594dc311b09b6b832b707a9abb357608cc6e4
    ("UPSTREAM: arm64: include alternative handling in dcache_by_line_op").

* drivers/android/binder.c
    Pick changes from LTS commit 14f09e8e7cd8 ("ANDROID: binder: Add strong ref checks"),
    instead of AOSP Change-Id: I66c15b066808f28bd27bfe50fd0e03ff45a09fca
    ("ANDROID: binder: Add strong ref checks").

* drivers/usb/gadget/function/u_ether.c
    Refactor throttling of highspeed IRQ logic in AOSP by adding
    a check for last queue request as intended by LTS commit
    660c04e8f174 ("usb: gadget: function: u_ether: don't starve tx request queue").
    Fixes AOSP Change-Id: I26515bfd9bbc8f7af38be7835692143f7093118a
    ("USB: gadget: u_ether: Fix data stall issue in RNDIS tethering mode").

Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
28 files changed:
1  2 
arch/arm/Kconfig
arch/arm64/Kconfig
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/memory.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/head.S
arch/arm64/kernel/setup.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/mm/proc.S
arch/x86/include/asm/uaccess.h
drivers/android/binder.c
drivers/usb/gadget/function/u_ether.c
include/net/ip.h
include/net/sock.h
include/uapi/linux/rtnetlink.h
kernel/cgroup.c
mm/memcontrol.c
net/ipv4/af_inet.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c

diff --combined arch/arm/Kconfig
index f4a214446b803effdef5447ac0b7497b65353a26,9049ac023bee94dd6458b3bf2a4bebcbe9d5f593..625765fb805adbbad99327017d16e894b8265096
@@@ -35,10 -35,10 +35,11 @@@ config AR
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
 +      select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_ARM_SMCCC if CPU_V7
        select HAVE_BPF_JIT
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CONTEXT_TRACKING
@@@ -310,14 -310,6 +311,14 @@@ config MM
          Select if you want MMU-based virtualised addressing space
          support by paged memory management. If unsure, say 'Y'.
  
 +config ARCH_MMAP_RND_BITS_MIN
 +      default 8
 +
 +config ARCH_MMAP_RND_BITS_MAX
 +      default 14 if PAGE_OFFSET=0x40000000
 +      default 15 if PAGE_OFFSET=0x80000000
 +      default 16
 +
  #
  # The "ARM system type" choice list is ordered alphabetically by option
  # text.  Please add new entries in the option alphabetic order.
@@@ -1432,8 -1424,7 +1433,7 @@@ config BIG_LITTL
  
  config BL_SWITCHER
        bool "big.LITTLE switcher support"
-       depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
-       select ARM_CPU_SUSPEND
+       depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
        select CPU_PM
        help
          The big.LITTLE "switcher" provides the core functionality to
@@@ -1491,7 -1482,7 +1491,7 @@@ config HOTPLUG_CP
  
  config ARM_PSCI
        bool "Support for the ARM Power State Coordination Interface (PSCI)"
-       depends on CPU_V7
+       depends on HAVE_ARM_SMCCC
        select ARM_PSCI_FW
        help
          Say Y here if you want Linux to communicate with system firmware
@@@ -1826,15 -1817,6 +1826,15 @@@ config XE
        help
          Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
  
 +config ARM_FLUSH_CONSOLE_ON_RESTART
 +      bool "Force flush the console on restart"
 +      help
 +        If the console is locked while the system is rebooted, the messages
 +        in the temporary logbuffer would not have propogated to all the
 +        console drivers. This option forces the console lock to be
 +        released if it failed to be acquired, which will cause all the
 +        pending messages to be flushed.
 +
  endmenu
  
  menu "Boot options"
@@@ -1863,21 -1845,6 +1863,21 @@@ config DEPRECATED_PARAM_STRUC
          This was deprecated in 2001 and announced to live on for 5 years.
          Some old boot loaders still use this way.
  
 +config BUILD_ARM_APPENDED_DTB_IMAGE
 +      bool "Build a concatenated zImage/dtb by default"
 +      depends on OF
 +      help
 +        Enabling this option will cause a concatenated zImage and list of
 +        DTBs to be built by default (instead of a standalone zImage.)
 +        The image will built in arch/arm/boot/zImage-dtb
 +
 +config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES
 +      string "Default dtb names"
 +      depends on BUILD_ARM_APPENDED_DTB_IMAGE
 +      help
 +        Space separated list of names of dtbs to append when
 +        building a concatenated zImage-dtb.
 +
  # Compressed boot loader in ROM.  Yes, we really want to ask about
  # TEXT and BSS so we preserve their values in the config files.
  config ZBOOT_ROM_TEXT
@@@ -2174,7 -2141,8 +2174,8 @@@ config ARCH_SUSPEND_POSSIBL
        def_bool y
  
  config ARM_CPU_SUSPEND
-       def_bool PM_SLEEP
+       def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW
+       depends on ARCH_SUSPEND_POSSIBLE
  
  config ARCH_HIBERNATION_POSSIBLE
        bool
diff --combined arch/arm64/Kconfig
index f4637c624db21170c79536d94e035c75b0e4e00d,3510b01acc8cab9486d1f88bcb4076b6122941e4..2543791ce8c2ea95903a850e794effca827358a4
@@@ -54,8 -54,6 +54,8 @@@ config ARM6
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
        select HAVE_ARCH_KGDB
 +      select HAVE_ARCH_MMAP_RND_BITS
 +      select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_BPF_JIT
        select SPARSE_IRQ
        select SYSCTL_EXCEPTION_TRACE
        select HAVE_CONTEXT_TRACKING
+       select HAVE_ARM_SMCCC
        help
          ARM 64-bit (AArch64) Linux support.
  
@@@ -113,40 -112,9 +114,40 @@@ config ARCH_PHYS_ADDR_T_64BI
  config MMU
        def_bool y
  
 +config ARCH_MMAP_RND_BITS_MIN
 +       default 14 if ARM64_64K_PAGES
 +       default 16 if ARM64_16K_PAGES
 +       default 18
 +
 +# max bits determined by the following formula:
 +#  VA_BITS - PAGE_SHIFT - 3
 +config ARCH_MMAP_RND_BITS_MAX
 +       default 19 if ARM64_VA_BITS=36
 +       default 24 if ARM64_VA_BITS=39
 +       default 27 if ARM64_VA_BITS=42
 +       default 30 if ARM64_VA_BITS=47
 +       default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
 +       default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
 +       default 33 if ARM64_VA_BITS=48
 +       default 14 if ARM64_64K_PAGES
 +       default 16 if ARM64_16K_PAGES
 +       default 18
 +
 +config ARCH_MMAP_RND_COMPAT_BITS_MIN
 +       default 7 if ARM64_64K_PAGES
 +       default 9 if ARM64_16K_PAGES
 +       default 11
 +
 +config ARCH_MMAP_RND_COMPAT_BITS_MAX
 +       default 16
 +
  config NO_IOPORT_MAP
        def_bool y if !PCI
  
 +config ILLEGAL_POINTER_VALUE
 +      hex
 +      default 0xdead000000000000
 +
  config STACKTRACE_SUPPORT
        def_bool y
  
@@@ -718,14 -686,6 +719,14 @@@ config SETEND_EMULATIO
          If unsure, say Y
  endif
  
 +config ARM64_SW_TTBR0_PAN
 +      bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
 +      help
 +        Enabling this option prevents the kernel from accessing
 +        user-space memory directly by pointing TTBR0_EL1 to a reserved
 +        zeroed area and reserved ASID. The user access routines
 +        restore the valid TTBR0_EL1 temporarily.
 +
  menu "ARMv8.1 architectural features"
  
  config ARM64_HW_AFDBM
@@@ -815,7 -775,7 +816,7 @@@ config RELOCATABL
  
  config RANDOMIZE_BASE
        bool "Randomize the address of the kernel image"
 -      select ARM64_MODULE_PLTS
 +      select ARM64_MODULE_PLTS if MODULES
        select RELOCATABLE
        help
          Randomizes the virtual address at which the kernel image is
  
  config RANDOMIZE_MODULE_REGION_FULL
        bool "Randomize the module region independently from the core kernel"
 -      depends on RANDOMIZE_BASE
 +      depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
        default y
        help
          Randomizes the location of the module region without considering the
@@@ -868,23 -828,6 +869,23 @@@ config CMDLIN
          entering them here. As a minimum, you should specify the the
          root device (e.g. root=/dev/nfs).
  
 +choice
 +      prompt "Kernel command line type" if CMDLINE != ""
 +      default CMDLINE_FROM_BOOTLOADER
 +
 +config CMDLINE_FROM_BOOTLOADER
 +      bool "Use bootloader kernel arguments if available"
 +      help
 +        Uses the command-line options passed by the boot loader. If
 +        the boot loader doesn't provide any, the default kernel command
 +        string provided in CMDLINE will be used.
 +
 +config CMDLINE_EXTEND
 +      bool "Extend bootloader kernel arguments"
 +      help
 +        The command-line arguments provided by the boot loader will be
 +        appended to the default kernel command string.
 +
  config CMDLINE_FORCE
        bool "Always use the default kernel command string"
        help
          loader passes other arguments to the kernel.
          This is useful if you cannot or don't want to change the
          command-line options your boot loader passes to the kernel.
 +endchoice
  
  config EFI_STUB
        bool
@@@ -925,21 -867,6 +926,21 @@@ config DM
          However, even with this option, the resultant kernel should
          continue to boot on existing non-UEFI platforms.
  
 +config BUILD_ARM64_APPENDED_DTB_IMAGE
 +      bool "Build a concatenated Image.gz/dtb by default"
 +      depends on OF
 +      help
 +        Enabling this option will cause a concatenated Image.gz and list of
 +        DTBs to be built by default (instead of a standalone Image.gz.)
 +        The image will built in arch/arm64/boot/Image.gz-dtb
 +
 +config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
 +      string "Default dtb names"
 +      depends on BUILD_ARM64_APPENDED_DTB_IMAGE
 +      help
 +        Space separated list of names of dtbs to append when
 +        building a concatenated Image.gz-dtb.
 +
  endmenu
  
  menu "Userspace binary formats"
@@@ -975,6 -902,14 +976,14 @@@ menu "Power management options
  
  source "kernel/power/Kconfig"
  
+ config ARCH_HIBERNATION_POSSIBLE
+       def_bool y
+       depends on CPU_PM
+ config ARCH_HIBERNATION_HEADER
+       def_bool y
+       depends on HIBERNATION
  config ARCH_SUSPEND_POSSIBLE
        def_bool y
  
index d4c925ccc7dec8689b2ef891ae2e4ebd501fce94,308d96eaeeaeb0e8fc2f3e54c6a11fb3882a15a6..46ee050ab747c16b1b3ec2c5ba5ee34d0e516f0c
@@@ -34,9 -34,9 +34,9 @@@
  #define ARM64_HAS_UAO                         9
  #define ARM64_ALT_PAN_NOT_UAO                 10
  
- #define ARM64_NCAPS                           11
- #define ARM64_WORKAROUND_CAVIUM_27456         12      
+ #define ARM64_WORKAROUND_CAVIUM_27456         11
+ #define ARM64_HAS_VIRT_HOST_EXTN              12
+ #define ARM64_NCAPS                           13
  
  #ifndef __ASSEMBLY__
  
@@@ -189,12 -189,6 +189,12 @@@ static inline bool system_supports_mixe
        return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
  }
  
 +static inline bool system_uses_ttbr0_pan(void)
 +{
 +      return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
 +              !cpus_have_cap(ARM64_HAS_PAN);
 +}
 +
  #endif /* __ASSEMBLY__ */
  
  #endif
index ba1b3409d7edd1fc349ef474631d1fd263226e78,d776037d199fa4ca0257ba6cf5beda36d001d6e2..ae11e8fdbfd2b29d7e326683cb49cd6a325ef39f
@@@ -71,6 -71,9 +71,9 @@@
  
  #define TASK_UNMAPPED_BASE    (PAGE_ALIGN(TASK_SIZE / 4))
  
+ #define KERNEL_START      _text
+ #define KERNEL_END        _end
  /*
   * The size of the KASAN shadow region. This should be 1/8th of the
   * size of the entire kernel virtual address space.
@@@ -193,11 -196,7 +196,11 @@@ static inline void *phys_to_virt(phys_a
  #define ARCH_PFN_OFFSET               ((unsigned long)PHYS_PFN_OFFSET)
  
  #define virt_to_page(kaddr)   pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 -#define       virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 +#define _virt_addr_valid(kaddr)       pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 +
 +#define _virt_addr_is_linear(kaddr)   (((u64)(kaddr)) >= PAGE_OFFSET)
 +#define virt_addr_valid(kaddr)                (_virt_addr_is_linear(kaddr) && \
 +                                       _virt_addr_valid(kaddr))
  
  #endif
  
index c54384b7e8b23f4490e6ed901a9472dbf803f5bf,2bb17bd556f8dd5114e52b7fec90bf926cd012c7..dac70c160289569782878ac82c3525f37afd71b5
  #include <linux/mm.h>
  #include <linux/dma-mapping.h>
  #include <linux/kvm_host.h>
+ #include <linux/suspend.h>
  #include <asm/thread_info.h>
  #include <asm/memory.h>
  #include <asm/smp_plat.h>
  #include <asm/suspend.h>
  #include <asm/vdso_datapage.h>
  #include <linux/kbuild.h>
+ #include <linux/arm-smccc.h>
  
  int main(void)
  {
@@@ -36,9 -38,6 +38,9 @@@
    DEFINE(TI_FLAGS,            offsetof(struct thread_info, flags));
    DEFINE(TI_PREEMPT,          offsetof(struct thread_info, preempt_count));
    DEFINE(TI_ADDR_LIMIT,               offsetof(struct thread_info, addr_limit));
 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 +  DEFINE(TI_TTBR0,            offsetof(struct thread_info, ttbr0));
 +#endif
    DEFINE(TI_TASK,             offsetof(struct thread_info, task));
    DEFINE(TI_CPU,              offsetof(struct thread_info, cpu));
    BLANK();
    DEFINE(CPU_GP_REGS,         offsetof(struct kvm_cpu_context, gp_regs));
    DEFINE(CPU_USER_PT_REGS,    offsetof(struct kvm_regs, regs));
    DEFINE(CPU_FP_REGS,         offsetof(struct kvm_regs, fp_regs));
-   DEFINE(CPU_SP_EL1,          offsetof(struct kvm_regs, sp_el1));
-   DEFINE(CPU_ELR_EL1,         offsetof(struct kvm_regs, elr_el1));
-   DEFINE(CPU_SPSR,            offsetof(struct kvm_regs, spsr));
-   DEFINE(CPU_SYSREGS,         offsetof(struct kvm_cpu_context, sys_regs));
+   DEFINE(VCPU_FPEXC32_EL2,    offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
    DEFINE(VCPU_ESR_EL2,                offsetof(struct kvm_vcpu, arch.fault.esr_el2));
    DEFINE(VCPU_FAR_EL2,                offsetof(struct kvm_vcpu, arch.fault.far_el2));
    DEFINE(VCPU_HPFAR_EL2,      offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
-   DEFINE(VCPU_DEBUG_FLAGS,    offsetof(struct kvm_vcpu, arch.debug_flags));
-   DEFINE(VCPU_DEBUG_PTR,      offsetof(struct kvm_vcpu, arch.debug_ptr));
-   DEFINE(DEBUG_BCR,           offsetof(struct kvm_guest_debug_arch, dbg_bcr));
-   DEFINE(DEBUG_BVR,           offsetof(struct kvm_guest_debug_arch, dbg_bvr));
-   DEFINE(DEBUG_WCR,           offsetof(struct kvm_guest_debug_arch, dbg_wcr));
-   DEFINE(DEBUG_WVR,           offsetof(struct kvm_guest_debug_arch, dbg_wvr));
-   DEFINE(VCPU_HCR_EL2,                offsetof(struct kvm_vcpu, arch.hcr_el2));
-   DEFINE(VCPU_MDCR_EL2,       offsetof(struct kvm_vcpu, arch.mdcr_el2));
-   DEFINE(VCPU_IRQ_LINES,      offsetof(struct kvm_vcpu, arch.irq_lines));
    DEFINE(VCPU_HOST_CONTEXT,   offsetof(struct kvm_vcpu, arch.host_cpu_context));
-   DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
-   DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
-   DEFINE(VCPU_TIMER_CNTV_CVAL,        offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
-   DEFINE(KVM_TIMER_CNTVOFF,   offsetof(struct kvm, arch.timer.cntvoff));
-   DEFINE(KVM_TIMER_ENABLED,   offsetof(struct kvm, arch.timer.enabled));
-   DEFINE(VCPU_KVM,            offsetof(struct kvm_vcpu, kvm));
-   DEFINE(VCPU_VGIC_CPU,               offsetof(struct kvm_vcpu, arch.vgic_cpu));
-   DEFINE(VGIC_V2_CPU_HCR,     offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
-   DEFINE(VGIC_V2_CPU_VMCR,    offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
-   DEFINE(VGIC_V2_CPU_MISR,    offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
-   DEFINE(VGIC_V2_CPU_EISR,    offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
-   DEFINE(VGIC_V2_CPU_ELRSR,   offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
-   DEFINE(VGIC_V2_CPU_APR,     offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
-   DEFINE(VGIC_V2_CPU_LR,      offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
-   DEFINE(VGIC_V3_CPU_SRE,     offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
-   DEFINE(VGIC_V3_CPU_HCR,     offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
-   DEFINE(VGIC_V3_CPU_VMCR,    offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
-   DEFINE(VGIC_V3_CPU_MISR,    offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
-   DEFINE(VGIC_V3_CPU_EISR,    offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
-   DEFINE(VGIC_V3_CPU_ELRSR,   offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
-   DEFINE(VGIC_V3_CPU_AP0R,    offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
-   DEFINE(VGIC_V3_CPU_AP1R,    offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
-   DEFINE(VGIC_V3_CPU_LR,      offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
-   DEFINE(VGIC_CPU_NR_LR,      offsetof(struct vgic_cpu, nr_lr));
-   DEFINE(KVM_VTTBR,           offsetof(struct kvm, arch.vttbr));
-   DEFINE(KVM_VGIC_VCTRL,      offsetof(struct kvm, arch.vgic.vctrl_base));
  #endif
  #ifdef CONFIG_CPU_PM
    DEFINE(CPU_SUSPEND_SZ,      sizeof(struct cpu_suspend_ctx));
    DEFINE(CPU_CTX_SP,          offsetof(struct cpu_suspend_ctx, sp));
    DEFINE(MPIDR_HASH_MASK,     offsetof(struct mpidr_hash, mask));
    DEFINE(MPIDR_HASH_SHIFTS,   offsetof(struct mpidr_hash, shift_aff));
-   DEFINE(SLEEP_SAVE_SP_SZ,    sizeof(struct sleep_save_sp));
-   DEFINE(SLEEP_SAVE_SP_PHYS,  offsetof(struct sleep_save_sp, save_ptr_stash_phys));
-   DEFINE(SLEEP_SAVE_SP_VIRT,  offsetof(struct sleep_save_sp, save_ptr_stash));
+   DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS,        offsetof(struct sleep_stack_data, system_regs));
+   DEFINE(SLEEP_STACK_DATA_CALLEE_REGS,        offsetof(struct sleep_stack_data, callee_saved_regs));
  #endif
+   DEFINE(ARM_SMCCC_RES_X0_OFFS,       offsetof(struct arm_smccc_res, a0));
+   DEFINE(ARM_SMCCC_RES_X2_OFFS,       offsetof(struct arm_smccc_res, a2));
+   BLANK();
+   DEFINE(HIBERN_PBE_ORIG,     offsetof(struct pbe, orig_address));
+   DEFINE(HIBERN_PBE_ADDR,     offsetof(struct pbe, address));
+   DEFINE(HIBERN_PBE_NEXT,     offsetof(struct pbe, next));
    return 0;
  }
index 40ee3f2933e78dffe011e4d2e5615f0d42222e53,24ecbeb733ed354f24be2d11fcfb9eba3ddd939c..a0c41dae0d8118d5b9c888299cad0895f0ac9036
@@@ -26,6 -26,7 +26,7 @@@
  #include <asm/cpu_ops.h>
  #include <asm/processor.h>
  #include <asm/sysreg.h>
+ #include <asm/virt.h>
  
  unsigned long elf_hwcap __read_mostly;
  EXPORT_SYMBOL_GPL(elf_hwcap);
@@@ -43,7 -44,6 +44,7 @@@ unsigned int compat_elf_hwcap2 __read_m
  #endif
  
  DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 +EXPORT_SYMBOL(cpu_hwcaps);
  
  #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
        {                                               \
@@@ -647,6 -647,11 +648,11 @@@ static bool has_no_hw_prefetch(const st
        return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
  }
  
+ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
+ {
+       return is_kernel_in_hyp_mode();
+ }
  static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
                .matches = cpufeature_pan_not_uao,
        },
  #endif /* CONFIG_ARM64_PAN */
+       {
+               .desc = "Virtualization Host Extensions",
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+               .matches = runs_at_el2,
+       },
        {},
  };
  
diff --combined arch/arm64/kernel/head.S
index c54df6d8d8fe8e52f7b261c0ca0ed6f2cb1fd21e,029c466eaa4c68cad67da330be9024ccfbbd82b4..8cfd5ab377434b2ccafd5b8d7c4701386faf8a95
@@@ -50,9 -50,6 +50,6 @@@
  #error TEXT_OFFSET must be less than 2MB
  #endif
  
- #define KERNEL_START  _text
- #define KERNEL_END    _end
  /*
   * Kernel startup entry point.
   * ---------------------------
@@@ -321,14 -318,14 +318,14 @@@ __create_page_tables
         * dirty cache lines being evicted.
         */
        mov     x0, x25
 -      add     x1, x26, #SWAPPER_DIR_SIZE
 +      add     x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        bl      __inval_cache_range
  
        /*
         * Clear the idmap and swapper page tables.
         */
        mov     x0, x25
 -      add     x6, x26, #SWAPPER_DIR_SIZE
 +      add     x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
  1:    stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
         * tables again to remove any speculatively loaded cache lines.
         */
        mov     x0, x25
 -      add     x1, x26, #SWAPPER_DIR_SIZE
 +      add     x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        dmb     sy
        bl      __inval_cache_range
  
@@@ -666,7 -663,7 +663,7 @@@ ENDPROC(__secondary_switched
   * If it isn't, park the CPU
   */
        .section        ".idmap.text", "ax"
- __enable_mmu:
+ ENTRY(__enable_mmu)
        mrs     x18, sctlr_el1                  // preserve old SCTLR_EL1 value
        mrs     x1, ID_AA64MMFR0_EL1
        ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
        isb
        bl      __create_page_tables            // recreate kernel mapping
  
 +      tlbi    vmalle1                         // Remove any stale TLB entries
 +      dsb     nsh
 +
        msr     sctlr_el1, x19                  // re-enable the MMU
        isb
        ic      iallu                           // flush instructions fetched
index 6591bf23422b3471fa8fb83d04445aac33dd808d,1e33d967c0ae5ce731a1980e1a41e6d7939ce568..0153c0d8ddb18e9cf94a17db4e230617eaad2b7b
@@@ -175,7 -175,6 +175,6 @@@ static void __init smp_build_mpidr_hash
         */
        if (mpidr_hash_size() > 4 * num_possible_cpus())
                pr_warn("Large number of MPIDR hash buckets detected\n");
-       __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
  }
  
  static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@@ -202,7 -201,7 +201,7 @@@ static void __init request_standard_res
        struct resource *res;
  
        kernel_code.start   = virt_to_phys(_text);
 -      kernel_code.end     = virt_to_phys(_etext - 1);
 +      kernel_code.end     = virt_to_phys(__init_begin - 1);
        kernel_data.start   = virt_to_phys(_sdata);
        kernel_data.end     = virt_to_phys(_end - 1);
  
@@@ -347,15 -346,6 +346,15 @@@ void __init setup_arch(char **cmdline_p
        smp_init_cpus();
        smp_build_mpidr_hash();
  
 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 +      /*
 +       * Make sure init_thread_info.ttbr0 always generates translation
 +       * faults in case uaccess_enable() is inadvertently called by the init
 +       * thread.
 +       */
 +      init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
 +#endif
 +
  #ifdef CONFIG_VT
  #if defined(CONFIG_VGA_CONSOLE)
        conswitchp = &vga_con;
index 9d25a1058c65d7b22b9ccadf0c2fec1989580beb,623532f44323026b4c2dfa54015f2e5714466ae0..7a5228c7abdd7cc9c8226df887a2ec9e7b4d053d
@@@ -46,6 -46,16 +46,16 @@@ jiffies = jiffies_64
        *(.idmap.text)                                  \
        VMLINUX_SYMBOL(__idmap_text_end) = .;
  
+ #ifdef CONFIG_HIBERNATION
+ #define HIBERNATE_TEXT                                        \
+       . = ALIGN(SZ_4K);                               \
+       VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
+       *(.hibernate_exit.text)                         \
+       VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
+ #else
+ #define HIBERNATE_TEXT
+ #endif
  /*
   * The size of the PE/COFF section that covers the kernel image, which
   * runs from stext to _edata, must be a round multiple of the PE/COFF
@@@ -115,6 -125,7 +125,7 @@@ SECTION
                        KPROBES_TEXT
                        HYPERVISOR_TEXT
                        IDMAP_TEXT
+                       HIBERNATE_TEXT
                        *(.fixup)
                        *(.gnu.warning)
                . = ALIGN(16);
        }
  
        . = ALIGN(SEGMENT_ALIGN);
 -      RO_DATA(PAGE_SIZE)              /* everything from this point to */
 -      EXCEPTION_TABLE(8)              /* _etext will be marked RO NX   */
 +      _etext = .;                     /* End of text section */
 +
 +      RO_DATA(PAGE_SIZE)              /* everything from this point to     */
 +      EXCEPTION_TABLE(8)              /* __init_begin will be marked RO NX */
        NOTES
  
        . = ALIGN(SEGMENT_ALIGN);
 -      _etext = .;                     /* End of text and rodata section */
        __init_begin = .;
  
        INIT_TEXT_SECTION(8)
        swapper_pg_dir = .;
        . += SWAPPER_DIR_SIZE;
  
 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 +      reserved_ttbr0 = .;
 +      . += RESERVED_TTBR0_SIZE;
 +#endif
 +
        _end = .;
  
        STABS_DEBUG
@@@ -203,6 -208,10 +214,10 @@@ ASSERT(__hyp_idmap_text_end - (__hyp_id
        "HYP init code too big or misaligned")
  ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
        "ID map text too big or misaligned")
+ #ifdef CONFIG_HIBERNATION
+ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
+       <= SZ_4K, "Hibernate exit text too big or misaligned")
+ #endif
  
  /*
   * If padding is applied before .head.text, virt<->phys conversions will fail.
diff --combined arch/arm64/mm/proc.S
index 85a542b2157521da47b39e2b2bff0180cf5b665a,5bb61de2320172c806ee58959e3f721b2b243a99..8292784d44c95508c50be40b201454fafc488d2e
@@@ -24,6 -24,7 +24,7 @@@
  #include <asm/asm-offsets.h>
  #include <asm/hwcap.h>
  #include <asm/pgtable.h>
+ #include <asm/pgtable-hwdef.h>
  #include <asm/cpufeature.h>
  #include <asm/alternative.h>
  
@@@ -63,62 -64,50 +64,50 @@@ ENTRY(cpu_do_suspend
        mrs     x2, tpidr_el0
        mrs     x3, tpidrro_el0
        mrs     x4, contextidr_el1
-       mrs     x5, mair_el1
-       mrs     x6, cpacr_el1
-       mrs     x7, ttbr1_el1
-       mrs     x8, tcr_el1
-       mrs     x9, vbar_el1
-       mrs     x10, mdscr_el1
-       mrs     x11, oslsr_el1
-       mrs     x12, sctlr_el1
+       mrs     x5, cpacr_el1
+       mrs     x6, tcr_el1
+       mrs     x7, vbar_el1
+       mrs     x8, mdscr_el1
+       mrs     x9, oslsr_el1
+       mrs     x10, sctlr_el1
        stp     x2, x3, [x0]
-       stp     x4, x5, [x0, #16]
-       stp     x6, x7, [x0, #32]
-       stp     x8, x9, [x0, #48]
-       stp     x10, x11, [x0, #64]
-       str     x12, [x0, #80]
+       stp     x4, xzr, [x0, #16]
+       stp     x5, x6, [x0, #32]
+       stp     x7, x8, [x0, #48]
+       stp     x9, x10, [x0, #64]
        ret
  ENDPROC(cpu_do_suspend)
  
  /**
   * cpu_do_resume - restore CPU register context
   *
-  * x0: Physical address of context pointer
-  * x1: ttbr0_el1 to be restored
-  *
-  * Returns:
-  *    sctlr_el1 value in x0
+  * x0: Address of context pointer
   */
  ENTRY(cpu_do_resume)
-       /*
-        * Invalidate local tlb entries before turning on MMU
-        */
-       tlbi    vmalle1
        ldp     x2, x3, [x0]
        ldp     x4, x5, [x0, #16]
-       ldp     x6, x7, [x0, #32]
-       ldp     x8, x9, [x0, #48]
-       ldp     x10, x11, [x0, #64]
-       ldr     x12, [x0, #80]
+       ldp     x6, x8, [x0, #32]
+       ldp     x9, x10, [x0, #48]
+       ldp     x11, x12, [x0, #64]
        msr     tpidr_el0, x2
        msr     tpidrro_el0, x3
        msr     contextidr_el1, x4
-       msr     mair_el1, x5
        msr     cpacr_el1, x6
-       msr     ttbr0_el1, x1
-       msr     ttbr1_el1, x7
-       tcr_set_idmap_t0sz x8, x7
+       /* Don't change t0sz here, mask those bits when restoring */
+       mrs     x5, tcr_el1
+       bfi     x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
        msr     tcr_el1, x8
        msr     vbar_el1, x9
        msr     mdscr_el1, x10
+       msr     sctlr_el1, x12
        /*
         * Restore oslsr_el1 by writing oslar_el1
         */
        ubfx    x11, x11, #1, #1
        msr     oslar_el1, x11
        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
-       mov     x0, x12
-       dsb     nsh             // Make sure local tlb invalidation completed
        isb
        ret
  ENDPROC(cpu_do_resume)
@@@ -136,8 -125,17 +125,8 @@@ ENTRY(cpu_do_switch_mm
        bfi     x0, x1, #48, #16                // set the ASID
        msr     ttbr0_el1, x0                   // set TTBR0
        isb
 -alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
 +      post_ttbr0_update_workaround
        ret
 -      nop
 -      nop
 -      nop
 -alternative_else
 -      ic      iallu
 -      dsb     nsh
 -      isb
 -      ret
 -alternative_endif
  ENDPROC(cpu_do_switch_mm)
  
        .pushsection ".idmap.text", "ax"
index dbe64f27280e34138dc5163b587579db35d3c768,f0bb7c1f7d199413abc9280a19150912096d6aa9..7402eb4b509d3e601df3a254557fc134eaa1cc43
@@@ -339,7 -339,7 +339,7 @@@ do {                                                                       
  #define __get_user_asm_u64(x, ptr, retval, errret) \
         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
  #define __get_user_asm_ex_u64(x, ptr) \
-        __get_user_asm_ex(x, ptr, "q", "", "=r")
+        __get_user_asm_ex(x, ptr, "q", "", "=&r")
  #endif
  
  #define __get_user_size(x, ptr, size, retval, errret)                 \
@@@ -386,13 -386,13 +386,13 @@@ do {                                                                    
        __chk_user_ptr(ptr);                                            \
        switch (size) {                                                 \
        case 1:                                                         \
-               __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
+               __get_user_asm_ex(x, ptr, "b", "b", "=&q");             \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
+               __get_user_asm_ex(x, ptr, "w", "w", "=&r");             \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
+               __get_user_asm_ex(x, ptr, "l", "k", "=&r");             \
                break;                                                  \
        case 8:                                                         \
                __get_user_asm_ex_u64(x, ptr);                          \
        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
                     "2:\n"                                             \
                     _ASM_EXTABLE_EX(1b, 2b)                            \
-                    : ltype(x) : "m" (__m(addr)))
+                    : ltype(x) : "m" (__m(addr)), "0" (0))
  
  #define __put_user_nocheck(x, ptr, size)                      \
  ({                                                            \
@@@ -706,7 -706,7 +706,7 @@@ __copy_from_user_overflow(int size, uns
  
  #endif
  
 -static inline unsigned long __must_check
 +static __always_inline unsigned long __must_check
  copy_from_user(void *to, const void __user *from, unsigned long n)
  {
        int sz = __compiletime_object_size(to);
        return n;
  }
  
 -static inline unsigned long __must_check
 +static __always_inline unsigned long __must_check
  copy_to_user(void __user *to, const void *from, unsigned long n)
  {
        int sz = __compiletime_object_size(from);
diff --combined drivers/android/binder.c
index bcf0a9420619fa34a1dd7c17088b846693ae197d,47ddfefe24431b1274cbac8f1eb1318a8d6e7a08..363db309ef79d54322628720e0c0e3e370575c60
@@@ -1003,7 -1003,7 +1003,7 @@@ static int binder_dec_node(struct binde
  
  
  static struct binder_ref *binder_get_ref(struct binder_proc *proc,
-                                        uint32_t desc, bool need_strong_ref)
+                                        u32 desc, bool need_strong_ref)
  {
        struct rb_node *n = proc->refs_by_desc.rb_node;
        struct binder_ref *ref;
@@@ -1290,8 -1290,10 +1290,10 @@@ static void binder_transaction_buffer_r
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle,
-                                               fp->type == BINDER_TYPE_HANDLE);
+                       struct binder_ref *ref;
+                       ref = binder_get_ref(proc, fp->handle,
+                                            fp->type == BINDER_TYPE_HANDLE);
  
                        if (ref == NULL) {
                                pr_err("transaction release %d bad handle %d\n",
@@@ -1326,7 -1328,6 +1328,7 @@@ static void binder_transaction(struct b
        struct binder_transaction *t;
        struct binder_work *tcomplete;
        binder_size_t *offp, *off_end;
 +      binder_size_t off_min;
        struct binder_proc *target_proc;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
                goto err_bad_offset;
        }
        off_end = (void *)offp + tr->offsets_size;
 +      off_min = 0;
        for (; offp < off_end; offp++) {
                struct flat_binder_object *fp;
  
                if (*offp > t->buffer->data_size - sizeof(*fp) ||
 +                  *offp < off_min ||
                    t->buffer->data_size < sizeof(*fp) ||
                    !IS_ALIGNED(*offp, sizeof(u32))) {
 -                      binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
 -                                        proc->pid, thread->pid, (u64)*offp);
 +                      binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
 +                                        proc->pid, thread->pid, (u64)*offp,
 +                                        (u64)off_min,
 +                                        (u64)(t->buffer->data_size -
 +                                        sizeof(*fp)));
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_offset;
                }
                fp = (struct flat_binder_object *)(t->buffer->data + *offp);
 +              off_min = *offp + sizeof(struct flat_binder_object);
                switch (fp->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle,
-                                               fp->type == BINDER_TYPE_HANDLE);
+                       struct binder_ref *ref;
+                       ref = binder_get_ref(proc, fp->handle,
+                                            fp->type == BINDER_TYPE_HANDLE);
  
                        if (ref == NULL) {
                                binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@@ -3613,24 -3610,13 +3617,24 @@@ static int binder_transactions_show(str
  
  static int binder_proc_show(struct seq_file *m, void *unused)
  {
 +      struct binder_proc *itr;
        struct binder_proc *proc = m->private;
        int do_lock = !binder_debug_no_lock;
 +      bool valid_proc = false;
  
        if (do_lock)
                binder_lock(__func__);
 -      seq_puts(m, "binder proc state:\n");
 -      print_binder_proc(m, proc, 1);
 +
 +      hlist_for_each_entry(itr, &binder_procs, proc_node) {
 +              if (itr == proc) {
 +                      valid_proc = true;
 +                      break;
 +              }
 +      }
 +      if (valid_proc) {
 +              seq_puts(m, "binder proc state:\n");
 +              print_binder_proc(m, proc, 1);
 +      }
        if (do_lock)
                binder_unlock(__func__);
        return 0;
index 74e9f5b5a45dd64d3cd4c3934d06a1637f1af6ef,b644248f4b8e31532e73b93bce50e7d14af01b58..e4920e5e1d647ff92c5118545309f1eb99396c36
@@@ -53,8 -53,6 +53,8 @@@
   * blocks and still have efficient handling. */
  #define GETHER_MAX_ETH_FRAME_LEN 15412
  
 +static struct workqueue_struct        *uether_wq;
 +
  struct eth_dev {
        /* lock is held while accessing port_usb
         */
  
        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
        struct list_head        tx_reqs, rx_reqs;
 -      atomic_t                tx_qlen;
 +      unsigned                tx_qlen;
 +/* Minimum number of TX USB request queued to UDC */
 +#define TX_REQ_THRESHOLD      5
 +      int                     no_tx_req_used;
 +      int                     tx_skb_hold_count;
 +      u32                     tx_req_bufsize;
  
        struct sk_buff_head     rx_frames;
  
        unsigned                qmult;
  
        unsigned                header_len;
 +      unsigned                ul_max_pkts_per_xfer;
 +      unsigned                dl_max_pkts_per_xfer;
        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
        int                     (*unwrap)(struct gether *,
                                                struct sk_buff *skb,
                                                struct sk_buff_head *list);
  
        struct work_struct      work;
 +      struct work_struct      rx_work;
  
        unsigned long           todo;
  #define       WORK_RX_MEMORY          0
@@@ -240,13 -230,9 +240,13 @@@ rx_submit(struct eth_dev *dev, struct u
        size += out->maxpacket - 1;
        size -= size % out->maxpacket;
  
 +      if (dev->ul_max_pkts_per_xfer)
 +              size *= dev->ul_max_pkts_per_xfer;
 +
        if (dev->port_usb->is_fixed)
                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
  
 +      DBG(dev, "%s: size: %zd\n", __func__, size);
        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
        if (skb == NULL) {
                DBG(dev, "no rx skb\n");
@@@ -272,16 -258,18 +272,16 @@@ enomem
                DBG(dev, "rx submit --> %d\n", retval);
                if (skb)
                        dev_kfree_skb_any(skb);
 -              spin_lock_irqsave(&dev->req_lock, flags);
 -              list_add(&req->list, &dev->rx_reqs);
 -              spin_unlock_irqrestore(&dev->req_lock, flags);
        }
        return retval;
  }
  
  static void rx_complete(struct usb_ep *ep, struct usb_request *req)
  {
 -      struct sk_buff  *skb = req->context, *skb2;
 +      struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
        int             status = req->status;
 +      bool            queue = 0;
  
        switch (status) {
  
                                status = dev->unwrap(dev->port_usb,
                                                        skb,
                                                        &dev->rx_frames);
 +                              if (status == -EINVAL)
 +                                      dev->net->stats.rx_errors++;
 +                              else if (status == -EOVERFLOW)
 +                                      dev->net->stats.rx_over_errors++;
                        } else {
                                dev_kfree_skb_any(skb);
                                status = -ENOTCONN;
                } else {
                        skb_queue_tail(&dev->rx_frames, skb);
                }
 -              skb = NULL;
 -
 -              skb2 = skb_dequeue(&dev->rx_frames);
 -              while (skb2) {
 -                      if (status < 0
 -                                      || ETH_HLEN > skb2->len
 -                                      || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
 -                              dev->net->stats.rx_errors++;
 -                              dev->net->stats.rx_length_errors++;
 -                              DBG(dev, "rx length %d\n", skb2->len);
 -                              dev_kfree_skb_any(skb2);
 -                              goto next_frame;
 -                      }
 -                      skb2->protocol = eth_type_trans(skb2, dev->net);
 -                      dev->net->stats.rx_packets++;
 -                      dev->net->stats.rx_bytes += skb2->len;
 -
 -                      /* no buffer copies needed, unless hardware can't
 -                       * use skb buffers.
 -                       */
 -                      status = netif_rx(skb2);
 -next_frame:
 -                      skb2 = skb_dequeue(&dev->rx_frames);
 -              }
 +              if (!status)
 +                      queue = 1;
                break;
  
        /* software-driven interface shutdown */
@@@ -333,20 -339,22 +333,20 @@@ quiesce
                /* FALLTHROUGH */
  
        default:
 +              queue = 1;
 +              dev_kfree_skb_any(skb);
                dev->net->stats.rx_errors++;
                DBG(dev, "rx status %d\n", status);
                break;
        }
  
 -      if (skb)
 -              dev_kfree_skb_any(skb);
 -      if (!netif_running(dev->net)) {
  clean:
 -              spin_lock(&dev->req_lock);
 -              list_add(&req->list, &dev->rx_reqs);
 -              spin_unlock(&dev->req_lock);
 -              req = NULL;
 -      }
 -      if (req)
 -              rx_submit(dev, req, GFP_ATOMIC);
 +      spin_lock(&dev->req_lock);
 +      list_add(&req->list, &dev->rx_reqs);
 +      spin_unlock(&dev->req_lock);
 +
 +      if (queue)
 +              queue_work(uether_wq, &dev->rx_work);
  }
  
  static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@@ -411,24 -419,16 +411,24 @@@ static void rx_fill(struct eth_dev *dev
  {
        struct usb_request      *req;
        unsigned long           flags;
 +      int                     req_cnt = 0;
  
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
        while (!list_empty(&dev->rx_reqs)) {
 +              /* break the nexus of continuous completion and re-submission*/
 +              if (++req_cnt > qlen(dev->gadget, dev->qmult))
 +                      break;
 +
                req = container_of(dev->rx_reqs.next,
                                struct usb_request, list);
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
  
                if (rx_submit(dev, req, gfp_flags) < 0) {
 +                      spin_lock_irqsave(&dev->req_lock, flags);
 +                      list_add(&req->list, &dev->rx_reqs);
 +                      spin_unlock_irqrestore(&dev->req_lock, flags);
                        defer_kevent(dev, WORK_RX_MEMORY);
                        return;
                }
        spin_unlock_irqrestore(&dev->req_lock, flags);
  }
  
 +static void process_rx_w(struct work_struct *work)
 +{
 +      struct eth_dev  *dev = container_of(work, struct eth_dev, rx_work);
 +      struct sk_buff  *skb;
 +      int             status = 0;
 +
 +      if (!dev->port_usb)
 +              return;
 +
 +      while ((skb = skb_dequeue(&dev->rx_frames))) {
 +              if (status < 0
 +                              || ETH_HLEN > skb->len
 +                              || skb->len > ETH_FRAME_LEN) {
 +                      dev->net->stats.rx_errors++;
 +                      dev->net->stats.rx_length_errors++;
 +                      DBG(dev, "rx length %d\n", skb->len);
 +                      dev_kfree_skb_any(skb);
 +                      continue;
 +              }
 +              skb->protocol = eth_type_trans(skb, dev->net);
 +              dev->net->stats.rx_packets++;
 +              dev->net->stats.rx_bytes += skb->len;
 +
 +              status = netif_rx_ni(skb);
 +      }
 +
 +      if (netif_running(dev->net))
 +              rx_fill(dev, GFP_KERNEL);
 +}
 +
  static void eth_work(struct work_struct *work)
  {
        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
@@@ -485,11 -455,6 +485,11 @@@ static void tx_complete(struct usb_ep *
  {
        struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
 +      struct net_device *net = dev->net;
 +      struct usb_request *new_req;
 +      struct usb_ep *in;
 +      int length;
 +      int retval;
  
        switch (req->status) {
        default:
        case -ESHUTDOWN:                /* disconnect etc */
                break;
        case 0:
 -              dev->net->stats.tx_bytes += skb->len;
 +              if (!req->zero)
 +                      dev->net->stats.tx_bytes += req->length-1;
 +              else
 +                      dev->net->stats.tx_bytes += req->length;
        }
        dev->net->stats.tx_packets++;
  
        spin_lock(&dev->req_lock);
 -      list_add(&req->list, &dev->tx_reqs);
 -      spin_unlock(&dev->req_lock);
 -      dev_kfree_skb_any(skb);
 +      list_add_tail(&req->list, &dev->tx_reqs);
 +
 +      if (dev->port_usb->multi_pkt_xfer) {
 +              dev->no_tx_req_used--;
 +              req->length = 0;
 +              in = dev->port_usb->in_ep;
 +
 +              if (!list_empty(&dev->tx_reqs)) {
 +                      new_req = container_of(dev->tx_reqs.next,
 +                                      struct usb_request, list);
 +                      list_del(&new_req->list);
 +                      spin_unlock(&dev->req_lock);
 +                      if (new_req->length > 0) {
 +                              length = new_req->length;
 +
 +                              /* NCM requires no zlp if transfer is
 +                               * dwNtbInMaxSize */
 +                              if (dev->port_usb->is_fixed &&
 +                                      length == dev->port_usb->fixed_in_len &&
 +                                      (length % in->maxpacket) == 0)
 +                                      new_req->zero = 0;
 +                              else
 +                                      new_req->zero = 1;
 +
 +                              /* use zlp framing on tx for strict CDC-Ether
 +                               * conformance, though any robust network rx
 +                               * path ignores extra padding. and some hardware
 +                               * doesn't like to write zlps.
 +                               */
 +                              if (new_req->zero && !dev->zlp &&
 +                                              (length % in->maxpacket) == 0) {
 +                                      new_req->zero = 0;
 +                                      length++;
 +                              }
 +
 +                              new_req->length = length;
 +                              retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
 +                              switch (retval) {
 +                              default:
 +                                      DBG(dev, "tx queue err %d\n", retval);
 +                                      break;
 +                              case 0:
 +                                      spin_lock(&dev->req_lock);
 +                                      dev->no_tx_req_used++;
 +                                      spin_unlock(&dev->req_lock);
 +                                      net->trans_start = jiffies;
 +                              }
 +                      } else {
 +                              spin_lock(&dev->req_lock);
 +                              list_add(&new_req->list, &dev->tx_reqs);
 +                              spin_unlock(&dev->req_lock);
 +                      }
 +              } else {
 +                      spin_unlock(&dev->req_lock);
 +              }
 +      } else {
 +              spin_unlock(&dev->req_lock);
 +              dev_kfree_skb_any(skb);
 +      }
  
 -      atomic_dec(&dev->tx_qlen);
        if (netif_carrier_ok(dev->net))
                netif_wake_queue(dev->net);
  }
@@@ -577,26 -484,6 +577,26 @@@ static inline int is_promisc(u16 cdc_fi
        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
  }
  
 +static void alloc_tx_buffer(struct eth_dev *dev)
 +{
 +      struct list_head        *act;
 +      struct usb_request      *req;
 +
 +      dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
 +                              (dev->net->mtu
 +                              + sizeof(struct ethhdr)
 +                              /* size of rndis_packet_msg_type */
 +                              + 44
 +                              + 22));
 +
 +      list_for_each(act, &dev->tx_reqs) {
 +              req = container_of(act, struct usb_request, list);
 +              if (!req->buf)
 +                      req->buf = kmalloc(dev->tx_req_bufsize,
 +                                              GFP_ATOMIC);
 +      }
 +}
 +
  static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                                        struct net_device *net)
  {
                return NETDEV_TX_OK;
        }
  
 +      /* Allocate memory for tx_reqs to support multi packet transfer */
 +      if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
 +              alloc_tx_buffer(dev);
 +
        /* apply outgoing CDC or RNDIS filters */
        if (skb && !is_promisc(cdc_filter)) {
                u8              *dest = skb->data;
                }
        }
  
 -      length = skb->len;
 -      req->buf = skb->data;
 -      req->context = skb;
 +      spin_lock_irqsave(&dev->req_lock, flags);
 +      dev->tx_skb_hold_count++;
 +      spin_unlock_irqrestore(&dev->req_lock, flags);
 +
 +      if (dev->port_usb->multi_pkt_xfer) {
 +              memcpy(req->buf + req->length, skb->data, skb->len);
 +              req->length = req->length + skb->len;
 +              length = req->length;
 +              dev_kfree_skb_any(skb);
 +
 +              spin_lock_irqsave(&dev->req_lock, flags);
 +              if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
 +                      if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
 +                              list_add(&req->list, &dev->tx_reqs);
 +                              spin_unlock_irqrestore(&dev->req_lock, flags);
 +                              goto success;
 +                      }
 +              }
 +
 +              dev->no_tx_req_used++;
 +              spin_unlock_irqrestore(&dev->req_lock, flags);
 +
 +              spin_lock_irqsave(&dev->lock, flags);
 +              dev->tx_skb_hold_count = 0;
 +              spin_unlock_irqrestore(&dev->lock, flags);
 +      } else {
 +              length = skb->len;
 +              req->buf = skb->data;
 +              req->context = skb;
 +      }
 +
        req->complete = tx_complete;
  
        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
         * though any robust network rx path ignores extra padding.
         * and some hardware doesn't like to write zlps.
         */
 -      if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 +      if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
 +              req->zero = 0;
                length++;
 +      }
  
        req->length = length;
  
 -      /* throttle high/super speed IRQ rate back slightly */
 -      if (gadget_is_dualspeed(dev->gadget))
 -              req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
 -                                     dev->gadget->speed == USB_SPEED_SUPER)) &&
 -                                      !list_empty(&dev->tx_reqs))
 -                      ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
 -                      : 0;
 +      /* throttle highspeed IRQ rate back slightly */
 +      if (gadget_is_dualspeed(dev->gadget) &&
-                        (dev->gadget->speed == USB_SPEED_HIGH)) {
++                       (dev->gadget->speed == USB_SPEED_HIGH) &&
++                       !list_empty(&dev->tx_reqs)) {
 +              dev->tx_qlen++;
 +              if (dev->tx_qlen == (dev->qmult/2)) {
 +                      req->no_interrupt = 0;
 +                      dev->tx_qlen = 0;
 +              } else {
 +                      req->no_interrupt = 1;
 +              }
 +      } else {
 +              req->no_interrupt = 0;
 +      }
  
        retval = usb_ep_queue(in, req, GFP_ATOMIC);
        switch (retval) {
                break;
        case 0:
                net->trans_start = jiffies;
 -              atomic_inc(&dev->tx_qlen);
        }
  
        if (retval) {
 -              dev_kfree_skb_any(skb);
 +              if (!dev->port_usb->multi_pkt_xfer)
 +                      dev_kfree_skb_any(skb);
  drop:
                dev->net->stats.tx_dropped++;
  multiframe:
                list_add(&req->list, &dev->tx_reqs);
                spin_unlock_irqrestore(&dev->req_lock, flags);
        }
 +success:
        return NETDEV_TX_OK;
  }
  
@@@ -790,7 -636,7 +791,7 @@@ static void eth_start(struct eth_dev *d
        rx_fill(dev, gfp_flags);
  
        /* and open the tx floodgates */
 -      atomic_set(&dev->tx_qlen, 0);
 +      dev->tx_qlen = 0;
        netif_wake_queue(dev->net);
  }
  
@@@ -936,7 -782,6 +937,7 @@@ struct eth_dev *gether_setup_name(struc
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
 +      INIT_WORK(&dev->rx_work, process_rx_w);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
  
@@@ -999,7 -844,6 +1000,7 @@@ struct net_device *gether_setup_name_de
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
 +      INIT_WORK(&dev->rx_work, process_rx_w);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
  
@@@ -1234,13 -1078,8 +1235,13 @@@ struct net_device *gether_connect(struc
                dev->header_len = link->header_len;
                dev->unwrap = link->unwrap;
                dev->wrap = link->wrap;
 +              dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
 +              dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
  
                spin_lock(&dev->lock);
 +              dev->tx_skb_hold_count = 0;
 +              dev->no_tx_req_used = 0;
 +              dev->tx_req_bufsize = 0;
                dev->port_usb = link;
                if (netif_running(dev->net)) {
                        if (link->open)
@@@ -1285,7 -1124,6 +1286,7 @@@ void gether_disconnect(struct gether *l
  {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
 +      struct sk_buff          *skb;
  
        WARN_ON(!dev);
        if (!dev)
                list_del(&req->list);
  
                spin_unlock(&dev->req_lock);
 +              if (link->multi_pkt_xfer)
 +                      kfree(req->buf);
                usb_ep_free_request(link->in_ep, req);
                spin_lock(&dev->req_lock);
        }
                spin_lock(&dev->req_lock);
        }
        spin_unlock(&dev->req_lock);
 +
 +      spin_lock(&dev->rx_frames.lock);
 +      while ((skb = __skb_dequeue(&dev->rx_frames)))
 +              dev_kfree_skb_any(skb);
 +      spin_unlock(&dev->rx_frames.lock);
 +
        link->out_ep->desc = NULL;
  
        /* finish forgetting about this USB link episode */
  }
  EXPORT_SYMBOL_GPL(gether_disconnect);
  
 -MODULE_LICENSE("GPL");
 +static int __init gether_init(void)
 +{
 +      uether_wq  = create_singlethread_workqueue("uether");
 +      if (!uether_wq) {
 +              pr_err("%s: Unable to create workqueue: uether\n", __func__);
 +              return -ENOMEM;
 +      }
 +      return 0;
 +}
 +module_init(gether_init);
 +
 +static void __exit gether_exit(void)
 +{
 +      destroy_workqueue(uether_wq);
 +
 +}
 +module_exit(gether_exit);
  MODULE_AUTHOR("David Brownell");
 +MODULE_DESCRIPTION("ethernet over USB driver");
 +MODULE_LICENSE("GPL v2");
diff --combined include/net/ip.h
index 4f3ef345f4c2efa31f2794011589e2540a99ae9c,b450d8653b30c0b838a2a591ad2e5d98b0ea42d3..f78c3a52529bc7885db8a01d5a3d604a99b87128
@@@ -170,7 -170,6 +170,7 @@@ struct ip_reply_arg 
                                /* -1 if not needed */ 
        int         bound_dev_if;
        u8          tos;
 +      kuid_t      uid;
  }; 
  
  #define IP_REPLY_ARG_NOSRCCHECK 1
@@@ -554,7 -553,7 +554,7 @@@ int ip_options_rcv_srr(struct sk_buff *
   */
  
  void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
- void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
+ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
  int ip_cmsg_send(struct net *net, struct msghdr *msg,
                 struct ipcm_cookie *ipc, bool allow_ipv6);
  int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@@ -576,7 -575,7 +576,7 @@@ void ip_local_error(struct sock *sk, in
  
  static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
  {
-       ip_cmsg_recv_offset(msg, skb, 0);
+       ip_cmsg_recv_offset(msg, skb, 0, 0);
  }
  
  bool icmp_global_allow(void);
diff --combined include/net/sock.h
index 2d663ee8494daa5b3d8968542bc014472d361187,3d5ff7436f41040eb7cbd9ef09f9cc57219b24c3..8f77df63a8f46670d8ff9be2be3b62d4e76032f8
@@@ -1067,7 -1067,6 +1067,7 @@@ struct proto 
        void                    (*destroy_cgroup)(struct mem_cgroup *memcg);
        struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
  #endif
 +      int                     (*diag_destroy)(struct sock *sk, int err);
  };
  
  int proto_register(struct proto *prot, int alloc_slab);
@@@ -1426,6 -1425,16 +1426,16 @@@ static inline void sk_mem_uncharge(stru
        if (!sk_has_account(sk))
                return;
        sk->sk_forward_alloc += size;
+       /* Avoid a possible overflow.
+        * TCP send queues can make this happen, if sk_mem_reclaim()
+        * is not called and more than 2 GBytes are released at once.
+        *
+        * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+        * no need to hold that much forward allocation anyway.
+        */
+       if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+               __sk_mem_reclaim(sk, 1 << 20);
  }
  
  static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
index 3eb02a1d6d8cca26f00e5d5c6399436100b172b8,fa3b3436556027aa587cd6d2042c00ba147ef53a..a2fad11894ffd16e5640ad3501eed3aca4b60438
@@@ -306,7 -306,6 +306,7 @@@ enum rtattr_type_t 
        RTA_TABLE,
        RTA_MARK,
        RTA_MFC_STATS,
 +      RTA_UID,
        RTA_VIA,
        RTA_NEWDST,
        RTA_PREF,
@@@ -344,7 -343,7 +344,7 @@@ struct rtnexthop 
  #define RTNH_F_OFFLOAD                8       /* offloaded route */
  #define RTNH_F_LINKDOWN               16      /* carrier-down on nexthop */
  
- #define RTNH_COMPARE_MASK     (RTNH_F_DEAD | RTNH_F_LINKDOWN)
+ #define RTNH_COMPARE_MASK     (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
  
  /* Macros to handle hexthops */
  
diff --combined kernel/cgroup.c
index e4552a3cbf418a666a55a462707aba46b0d66960,127c63e02d52b99d6c566ba50af719bde32f49c7..f48196a7414c0628504daa321f8d6a857eab37d7
@@@ -236,6 -236,9 +236,9 @@@ static int cgroup_addrm_files(struct cg
   */
  static bool cgroup_ssid_enabled(int ssid)
  {
+       if (CGROUP_SUBSYS_COUNT == 0)
+               return false;
        return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
  }
  
@@@ -2685,8 -2688,7 +2688,8 @@@ static int cgroup_procs_write_permissio
         */
        if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
            !uid_eq(cred->euid, tcred->uid) &&
 -          !uid_eq(cred->euid, tcred->suid))
 +          !uid_eq(cred->euid, tcred->suid) &&
 +          !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
                ret = -EACCES;
  
        if (!ret && cgroup_on_dfl(dst_cgrp)) {
@@@ -5326,12 -5328,6 +5329,12 @@@ int __init cgroup_init(void
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
  
 +      /*
 +       * The latency of the synchronize_sched() is too high for cgroups,
 +       * avoid it at the cost of forcing all readers into the slow path.
 +       */
 +      rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
 +
        mutex_lock(&cgroup_mutex);
  
        /* Add init_css_set to the hash table */
diff --combined mm/memcontrol.c
index 1e50d37ee132b3768ecb052486e0dd62f2a0d066,5d9c8a3136bc0404e086487b7b4a87a102a2fe2c..17dfe70f3309778e92d6968ed87b15d4baa7897a
@@@ -2055,6 -2055,15 +2055,15 @@@ retry
                     current->flags & PF_EXITING))
                goto force;
  
+       /*
+        * Prevent unbounded recursion when reclaim operations need to
+        * allocate memory. This might exceed the limits temporarily,
+        * but we prefer facilitating memory reclaim and getting back
+        * under the limit over triggering OOM kills in these cases.
+        */
+       if (unlikely(current->flags & PF_MEMALLOC))
+               goto force;
        if (unlikely(task_in_memcg_oom(current)))
                goto nomem;
  
@@@ -5343,7 -5352,6 +5352,7 @@@ struct cgroup_subsys memory_cgrp_subsy
        .css_reset = mem_cgroup_css_reset,
        .can_attach = mem_cgroup_can_attach,
        .cancel_attach = mem_cgroup_cancel_attach,
 +      .attach = mem_cgroup_move_task,
        .post_attach = mem_cgroup_move_task,
        .bind = mem_cgroup_bind,
        .dfl_cftypes = memory_files,
diff --combined net/ipv4/af_inet.c
index caa6f158a0775d5dd86b6e76198dbca5290c33b0,afc18e9ca94adeb33d6344481839701971815cbc..68bf7bdf7fdb71a7e5abd54e8826e1cfb67e52ec
  #endif
  #include <net/l3mdev.h>
  
 +#ifdef CONFIG_ANDROID_PARANOID_NETWORK
 +#include <linux/android_aid.h>
 +
 +static inline int current_has_network(void)
 +{
 +      return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
 +}
 +#else
 +static inline int current_has_network(void)
 +{
 +      return 1;
 +}
 +#endif
  
  /* The inetsw table contains everything that inet_create needs to
   * build a new socket.
@@@ -273,9 -260,6 +273,9 @@@ static int inet_create(struct net *net
        if (protocol < 0 || protocol >= IPPROTO_MAX)
                return -EINVAL;
  
 +      if (!current_has_network())
 +              return -EACCES;
 +
        sock->state = SS_UNCONNECTED;
  
        /* Look for the requested type/protocol pair. */
@@@ -324,7 -308,8 +324,7 @@@ lookup_protocol
        }
  
        err = -EPERM;
 -      if (sock->type == SOCK_RAW && !kern &&
 -          !ns_capable(net->user_ns, CAP_NET_RAW))
 +      if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
                goto out_rcu_unlock;
  
        sock->ops = answer->ops;
@@@ -1387,7 -1372,7 +1387,7 @@@ static struct sk_buff **inet_gro_receiv
        skb_gro_pull(skb, sizeof(*iph));
        skb_set_transport_header(skb, skb_gro_offset(skb));
  
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
  
  out_unlock:
        rcu_read_unlock();
diff --combined net/ipv4/route.c
index fb54659320d86af6a08355ebe2783ec4be3c97fe,8533a75a932869cc61add56b245b6343d9f4271e..39483512a54180b6bfec09f997f2d40bd123db87
@@@ -501,7 -501,7 +501,7 @@@ void __ip_select_ident(struct net *net
  }
  EXPORT_SYMBOL(__ip_select_ident);
  
 -static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
 +static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
                             const struct iphdr *iph,
                             int oif, u8 tos,
                             u8 prot, u32 mark, int flow_flags)
        flowi4_init_output(fl4, oif, mark, tos,
                           RT_SCOPE_UNIVERSE, prot,
                           flow_flags,
 -                         iph->daddr, iph->saddr, 0, 0);
 +                         iph->daddr, iph->saddr, 0, 0,
 +                         sk ? sock_i_uid(sk) : GLOBAL_ROOT_UID);
  }
  
  static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
 -                             const struct sock *sk)
 +                             struct sock *sk)
  {
        const struct iphdr *iph = ip_hdr(skb);
        int oif = skb->dev->ifindex;
        __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
  }
  
 -static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
 +static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
  {
        const struct inet_sock *inet = inet_sk(sk);
        const struct ip_options_rcu *inet_opt;
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
                           inet_sk_flowi_flags(sk),
 -                         daddr, inet->inet_saddr, 0, 0);
 +                         daddr, inet->inet_saddr, 0, 0,
 +                         sock_i_uid(sk));
        rcu_read_unlock();
  }
  
 -static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
 +static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
                                 const struct sk_buff *skb)
  {
        if (skb)
@@@ -2486,11 -2484,6 +2486,11 @@@ static int rt_fill_info(struct net *net
            nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
                goto nla_put_failure;
  
 +      if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
 +          nla_put_u32(skb, RTA_UID,
 +                      from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
 +              goto nla_put_failure;
 +
        error = rt->dst.error;
  
        if (rt_is_input_route(rt)) {
                    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
                        int err = ipmr_get_route(net, skb,
                                                 fl4->saddr, fl4->daddr,
-                                                r, nowait);
+                                                r, nowait, portid);
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
@@@ -2542,7 -2536,6 +2543,7 @@@ static int inet_rtm_getroute(struct sk_
        int mark;
        struct sk_buff *skb;
        u32 table_id = RT_TABLE_MAIN;
 +      kuid_t uid;
  
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
        if (err < 0)
        dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
        iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
        mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
 +      if (tb[RTA_UID])
 +              uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
 +      else
 +              uid = (iif ? INVALID_UID : current_uid());
  
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = dst;
        fl4.flowi4_tos = rtm->rtm_tos;
        fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
        fl4.flowi4_mark = mark;
 +      fl4.flowi4_uid = uid;
  
        if (netif_index_is_l3_master(net, fl4.flowi4_oif))
                fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF;
index b1784c897e6cdc781036914b709096fc09415e10,70fb352e317fc138ff48067c6c6aa747e3bd3f43..46123369144ffa5e7b4cbfea0355efa867c1d996
@@@ -97,11 -97,11 +97,11 @@@ static void inet_get_ping_group_range_t
                container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
  
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
  }
  
  /* Update system visible IP port range */
@@@ -110,10 -110,10 +110,10 @@@ static void set_ping_group_range(struc
        kgid_t *data = table->data;
        struct net *net =
                container_of(table->data, struct net, ipv4.ping_group_range.range);
-       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       write_seqlock(&net->ipv4.ping_group_range.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.ip_local_ports.lock);
+       write_sequnlock(&net->ipv4.ping_group_range.lock);
  }
  
  /* Validate changes from /proc interface. */
@@@ -152,21 -152,6 +152,21 @@@ static int ipv4_ping_group_range(struc
        return ret;
  }
  
 +/* Validate changes from /proc interface. */
 +static int proc_tcp_default_init_rwnd(struct ctl_table *ctl, int write,
 +                                    void __user *buffer,
 +                                    size_t *lenp, loff_t *ppos)
 +{
 +      int old_value = *(int *)ctl->data;
 +      int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 +      int new_value = *(int *)ctl->data;
 +
 +      if (write && ret == 0 && (new_value < 3 || new_value > 100))
 +              *(int *)ctl->data = old_value;
 +
 +      return ret;
 +}
 +
  static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
                                       void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@@ -776,13 -761,6 +776,13 @@@ static struct ctl_table ipv4_table[] = 
                .proc_handler   = proc_dointvec_ms_jiffies,
        },
        {
 +              .procname       = "tcp_default_init_rwnd",
 +              .data           = &sysctl_tcp_default_init_rwnd,
 +              .maxlen         = sizeof(int),
 +              .mode           = 0644,
 +              .proc_handler   = proc_tcp_default_init_rwnd
 +      },
 +      {
                .procname       = "icmp_msgs_per_sec",
                .data           = &sysctl_icmp_msgs_per_sec,
                .maxlen         = sizeof(int),
diff --combined net/ipv4/tcp_input.c
index 2bf1110aa2ae74ee04728626334cc464a8cac060,7cc0f8aac28f5de0cab88116071a450bc0ad0d41..35e97ff3054a8de4b4988c5ec70ea0f46fbea20d
@@@ -102,7 -102,6 +102,7 @@@ int sysctl_tcp_thin_dupack __read_mostl
  int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
  int sysctl_tcp_early_retrans __read_mostly = 3;
  int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 +int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2;
  
  #define FLAG_DATA             0x01 /* Incoming frame contained data.          */
  #define FLAG_WIN_UPDATE               0x02 /* Incoming ACK was a window update.       */
@@@ -2325,10 -2324,9 +2325,9 @@@ static void DBGUNDO(struct sock *sk, co
        }
  #if IS_ENABLED(CONFIG_IPV6)
        else if (sk->sk_family == AF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
                         msg,
-                        &np->daddr, ntohs(inet->inet_dport),
+                        &sk->sk_v6_daddr, ntohs(inet->inet_dport),
                         tp->snd_cwnd, tcp_left_out(tp),
                         tp->snd_ssthresh, tp->prior_ssthresh,
                         tp->packets_out);
diff --combined net/ipv4/tcp_output.c
index 9eb81a4b0da20b55229d488f5e4e8a69e7f282d2,0795647e94c601d47245fd3ea610de8841b92af7..ca3731721d81dac1b73ef94aee51ac2b4e3834e4
@@@ -191,7 -191,7 +191,7 @@@ u32 tcp_default_init_rwnd(u32 mss
         * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
         * limit when mss is larger than 1460.
         */
 -      u32 init_rwnd = TCP_INIT_CWND * 2;
 +      u32 init_rwnd = sysctl_tcp_default_init_rwnd;
  
        if (mss > 1460)
                init_rwnd = max((1460 * init_rwnd) / mss, 2U);
@@@ -1950,12 -1950,14 +1950,14 @@@ static int tcp_mtu_probe(struct sock *s
        len = 0;
        tcp_for_write_queue_from_safe(skb, next, sk) {
                copy = min_t(int, skb->len, probe_size - len);
-               if (nskb->ip_summed)
+               if (nskb->ip_summed) {
                        skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
-               else
-                       nskb->csum = skb_copy_and_csum_bits(skb, 0,
-                                                           skb_put(nskb, copy),
-                                                           copy, nskb->csum);
+               } else {
+                       __wsum csum = skb_copy_and_csum_bits(skb, 0,
+                                                            skb_put(nskb, copy),
+                                                            copy, 0);
+                       nskb->csum = csum_block_add(nskb->csum, csum, len);
+               }
  
                if (skb->len <= copy) {
                        /* We've eaten all the data from this skb.
@@@ -2569,7 -2571,8 +2571,8 @@@ int __tcp_retransmit_skb(struct sock *s
         * copying overhead: fragmentation, tunneling, mangling etc.
         */
        if (atomic_read(&sk->sk_wmem_alloc) >
-           min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+           min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+                 sk->sk_sndbuf))
                return -EAGAIN;
  
        if (skb_still_in_host_queue(sk, skb))
diff --combined net/ipv4/udp.c
index defc9cad1797eb696653784fb1560652eb23d8d0,e9513e397c4f30db956d1c92af1ec0d77c1530a6..381a035fcfa1ca57e28ddbd33a94f5ab6344112d
@@@ -1025,8 -1025,7 +1025,8 @@@ int udp_sendmsg(struct sock *sk, struc
                flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
                                   RT_SCOPE_UNIVERSE, sk->sk_protocol,
                                   flow_flags,
 -                                 faddr, saddr, dport, inet->inet_sport);
 +                                 faddr, saddr, dport, inet->inet_sport,
 +                                 sock_i_uid(sk));
  
                if (!saddr && ipc.oif) {
                        err = l3mdev_get_saddr(net, ipc.oif, fl4);
@@@ -1343,7 -1342,7 +1343,7 @@@ try_again
                *addr_len = sizeof(*sin);
        }
        if (inet->cmsg_flags)
-               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
+               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
  
        err = copied;
        if (flags & MSG_TRUNC)
@@@ -2265,20 -2264,6 +2265,20 @@@ unsigned int udp_poll(struct file *file
  }
  EXPORT_SYMBOL(udp_poll);
  
 +int udp_abort(struct sock *sk, int err)
 +{
 +      lock_sock(sk);
 +
 +      sk->sk_err = err;
 +      sk->sk_error_report(sk);
 +      udp_disconnect(sk, 0);
 +
 +      release_sock(sk);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(udp_abort);
 +
  struct proto udp_prot = {
        .name              = "UDP",
        .owner             = THIS_MODULE,
        .compat_getsockopt = compat_udp_getsockopt,
  #endif
        .clear_sk          = sk_prot_clear_portaddr_nulls,
 +      .diag_destroy      = udp_abort,
  };
  EXPORT_SYMBOL(udp_prot);
  
diff --combined net/ipv6/addrconf.c
index 563a91f15f68e706362a403364f5b13b9f3b3fc6,cb8bb5988c03fc7849eb5fbfda8b8bb9d55a7d34..1e541578a66d3eb63d864884156903909c4990e0
@@@ -205,7 -205,6 +205,7 @@@ static struct ipv6_devconf ipv6_devcon
        .accept_ra_rt_info_max_plen = 0,
  #endif
  #endif
 +      .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@@ -250,7 -249,6 +250,7 @@@ static struct ipv6_devconf ipv6_devconf
        .accept_ra_rt_info_max_plen = 0,
  #endif
  #endif
 +      .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@@ -2148,31 -2146,6 +2148,31 @@@ static void  __ipv6_try_regen_rndid(str
                __ipv6_regen_rndid(idev);
  }
  
 +u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
 +      /* Determines into what table to put autoconf PIO/RIO/default routes
 +       * learned on this device.
 +       *
 +       * - If 0, use the same table for every device. This puts routes into
 +       *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
 +       *   (but note that these three are currently all equal to
 +       *   RT6_TABLE_MAIN).
 +       * - If > 0, use the specified table.
 +       * - If < 0, put routes into table dev->ifindex + (-rt_table).
 +       */
 +      struct inet6_dev *idev = in6_dev_get(dev);
 +      u32 table;
 +      int sysctl = idev->cnf.accept_ra_rt_table;
 +      if (sysctl == 0) {
 +              table = default_table;
 +      } else if (sysctl > 0) {
 +              table = (u32) sysctl;
 +      } else {
 +              table = (unsigned) dev->ifindex + (-sysctl);
 +      }
 +      in6_dev_put(idev);
 +      return table;
 +}
 +
  /*
   *    Add prefix route.
   */
@@@ -2182,7 -2155,7 +2182,7 @@@ addrconf_prefix_route(struct in6_addr *
                      unsigned long expires, u32 flags)
  {
        struct fib6_config cfg = {
 -              .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
 +              .fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_expires = expires,
@@@ -2215,7 -2188,7 +2215,7 @@@ static struct rt6_info *addrconf_get_pr
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
 -      u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
 +      u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
  
        table = fib6_get_table(dev_net(dev), tb_id);
        if (!table)
@@@ -2943,7 -2916,7 +2943,7 @@@ static void init_loopback(struct net_de
                                 * lo device down, release this obsolete dst and
                                 * reallocate a new router for ifa.
                                 */
-                               if (sp_ifa->rt->dst.obsolete > 0) {
+                               if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
                                        ip6_rt_put(sp_ifa->rt);
                                        sp_ifa->rt = NULL;
                                } else {
@@@ -4692,7 -4665,6 +4692,7 @@@ static inline void ipv6_store_devconf(s
        array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
  #endif
  #endif
 +      array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
        array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
        array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
  #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@@ -5659,13 -5631,6 +5659,13 @@@ static struct addrconf_sysctl_tabl
                },
  #endif
  #endif
 +              {
 +                      .procname       = "accept_ra_rt_table",
 +                      .data           = &ipv6_devconf.accept_ra_rt_table,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
 +              },
                {
                        .procname       = "proxy_ndp",
                        .data           = &ipv6_devconf.proxy_ndp,
diff --combined net/ipv6/route.c
index aed4f305f5f60d33097f2421dbb515966d3b43f7,dbffc9de184b1760752ccd62d915fb0273eed99b..46476a3af2ad68682c964060004ace0d7097b20b
@@@ -99,12 -99,13 +99,12 @@@ static void                rt6_dst_from_metrics_check
  static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
  
  #ifdef CONFIG_IPV6_ROUTE_INFO
 -static struct rt6_info *rt6_add_route_info(struct net *net,
 +static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex,
 -                                         unsigned int pref);
 -static struct rt6_info *rt6_get_route_info(struct net *net,
 +                                         const struct in6_addr *gwaddr, unsigned int pref);
 +static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex);
 +                                         const struct in6_addr *gwaddr);
  #endif
  
  struct uncached_list {
@@@ -754,6 -755,7 +754,6 @@@ static bool rt6_is_gw_or_nonexthop(cons
  int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
                  const struct in6_addr *gwaddr)
  {
 -      struct net *net = dev_net(dev);
        struct route_info *rinfo = (struct route_info *) opt;
        struct in6_addr prefix_buf, *prefix;
        unsigned int pref;
        if (rinfo->prefix_len == 0)
                rt = rt6_get_dflt_router(gwaddr, dev);
        else
 -              rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
 -                                      gwaddr, dev->ifindex);
 +              rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
  
        if (rt && !lifetime) {
                ip6_del_rt(rt);
        }
  
        if (!rt && lifetime)
 -              rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
 -                                      pref);
 +              rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
        else if (rt)
                rt->rt6i_flags = RTF_ROUTEINFO |
                                 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@@ -1382,7 -1386,7 +1382,7 @@@ static void ip6_rt_update_pmtu(struct d
  }
  
  void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 -                   int oif, u32 mark)
 +                   int oif, u32 mark, kuid_t uid)
  {
        const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
        struct dst_entry *dst;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
 +      fl6.flowi6_uid = uid;
  
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
@@@ -1406,7 -1409,7 +1406,7 @@@ EXPORT_SYMBOL_GPL(ip6_update_pmtu)
  void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
  {
        ip6_update_pmtu(skb, sock_net(sk), mtu,
 -                      sk->sk_bound_dev_if, sk->sk_mark);
 +                      sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
  }
  EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
  
@@@ -2245,16 -2248,15 +2245,16 @@@ static void ip6_rt_copy_init(struct rt6
  }
  
  #ifdef CONFIG_IPV6_ROUTE_INFO
 -static struct rt6_info *rt6_get_route_info(struct net *net,
 +static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex)
 +                                         const struct in6_addr *gwaddr)
  {
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
  
 -      table = fib6_get_table(net, RT6_TABLE_INFO);
 +      table = fib6_get_table(dev_net(dev),
 +                             addrconf_rt_table(dev, RT6_TABLE_INFO));
        if (!table)
                return NULL;
  
                goto out;
  
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 -              if (rt->dst.dev->ifindex != ifindex)
 +              if (rt->dst.dev->ifindex != dev->ifindex)
                        continue;
                if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
                        continue;
        return rt;
  }
  
 -static struct rt6_info *rt6_add_route_info(struct net *net,
 +static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex,
 -                                         unsigned int pref)
 +                                         const struct in6_addr *gwaddr, unsigned int pref)
  {
        struct fib6_config cfg = {
                .fc_metric      = IP6_RT_PRIO_USER,
 -              .fc_ifindex     = ifindex,
 +              .fc_ifindex     = dev->ifindex,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
 -              .fc_nlinfo.nl_net = net,
 +              .fc_nlinfo.nl_net = dev_net(dev),
        };
  
 -      cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
 +      cfg.fc_table = l3mdev_fib_table_by_index(dev_net(dev), dev->ifindex) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
        cfg.fc_dst = *prefix;
        cfg.fc_gateway = *gwaddr;
  
  
        ip6_route_add(&cfg);
  
 -      return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
 +      return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
  }
  #endif
  
@@@ -2312,8 -2315,7 +2312,8 @@@ struct rt6_info *rt6_get_dflt_router(co
        struct rt6_info *rt;
        struct fib6_table *table;
  
 -      table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
 +      table = fib6_get_table(dev_net(dev),
 +                             addrconf_rt_table(dev, RT6_TABLE_MAIN));
        if (!table)
                return NULL;
  
@@@ -2335,7 -2337,7 +2335,7 @@@ struct rt6_info *rt6_add_dflt_router(co
                                     unsigned int pref)
  {
        struct fib6_config cfg = {
 -              .fc_table       = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
 +              .fc_table       = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT),
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
        return rt6_get_dflt_router(gwaddr, dev);
  }
  
 -void rt6_purge_dflt_routers(struct net *net)
 -{
 -      struct rt6_info *rt;
 -      struct fib6_table *table;
  
 -      /* NOTE: Keep consistent with rt6_get_dflt_router */
 -      table = fib6_get_table(net, RT6_TABLE_DFLT);
 -      if (!table)
 -              return;
 +int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
 +      if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
 +          (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
 +              return -1;
 +      return 0;
 +}
  
 -restart:
 -      read_lock_bh(&table->tb6_lock);
 -      for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
 -              if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
 -                  (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
 -                      dst_hold(&rt->dst);
 -                      read_unlock_bh(&table->tb6_lock);
 -                      ip6_del_rt(rt);
 -                      goto restart;
 -              }
 -      }
 -      read_unlock_bh(&table->tb6_lock);
 +void rt6_purge_dflt_routers(struct net *net)
 +{
 +      fib6_clean_all(net, rt6_addrconf_purge, NULL);
  }
  
  static void rtmsg_to_fib6_config(struct net *net,
@@@ -2687,7 -2700,6 +2687,7 @@@ static const struct nla_policy rtm_ipv6
        [RTA_PREF]              = { .type = NLA_U8 },
        [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
        [RTA_ENCAP]             = { .type = NLA_NESTED },
 +      [RTA_UID]               = { .type = NLA_U32 },
  };
  
  static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@@ -3128,7 -3140,9 +3128,9 @@@ static int rt6_fill_node(struct net *ne
        if (iif) {
  #ifdef CONFIG_IPV6_MROUTE
                if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
-                       int err = ip6mr_get_route(net, skb, rtm, nowait);
+                       int err = ip6mr_get_route(net, skb, rtm, nowait,
+                                                 portid);
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
@@@ -3249,11 -3263,6 +3251,11 @@@ static int inet6_rtm_getroute(struct sk
        if (tb[RTA_MARK])
                fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
  
 +      if (tb[RTA_UID])
 +              fl6.flowi6_uid = make_kuid(current_user_ns(),
 +                                         nla_get_u32(tb[RTA_UID]));
 +      else
 +              fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
        if (iif) {
                struct net_device *dev;
                int flags = 0;
diff --combined net/ipv6/tcp_ipv6.c
index f58632cc45dc1e2367f2f1f7131a7e215f0d9dda,fbd521fdae53606bb6db7dc729ed788b3abd82f0..b831f3eb55a40f9de32a3cf30ba34e9dd853ec23
@@@ -234,7 -234,6 +234,7 @@@ static int tcp_v6_connect(struct sock *
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
 +      fl6.flowi6_uid = sock_i_uid(sk);
  
        opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
        final_p = fl6_update_dst(&fl6, opt, &final);
        return NULL;
  }
  
+ static void tcp_v6_restore_cb(struct sk_buff *skb)
+ {
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+ }
  /* The socket must have it's spinlock held when we get
   * here, unless it is a TCP_LISTEN socket.
   *
@@@ -1309,6 -1318,7 +1319,7 @@@ ipv6_pktoptions
                        np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
                if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
                        skb_set_owner_r(opt_skb, sk);
+                       tcp_v6_restore_cb(opt_skb);
                        opt_skb = xchg(&np->pktoptions, opt_skb);
                } else {
                        __kfree_skb(opt_skb);
@@@ -1342,15 -1352,6 +1353,6 @@@ static void tcp_v6_fill_cb(struct sk_bu
        TCP_SKB_CB(skb)->sacked = 0;
  }
  
- static void tcp_v6_restore_cb(struct sk_buff *skb)
- {
-       /* We need to move header back to the beginning if xfrm6_policy_check()
-        * and tcp_v6_fill_cb() are going to be called again.
-        */
-       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-               sizeof(struct inet6_skb_parm));
- }
  static int tcp_v6_rcv(struct sk_buff *skb)
  {
        const struct tcphdr *th;
@@@ -1901,7 -1902,6 +1903,7 @@@ struct proto tcpv6_prot = 
        .proto_cgroup           = tcp_proto_cgroup,
  #endif
        .clear_sk               = tcp_v6_clear_sk,
 +      .diag_destroy           = tcp_abort,
  };
  
  static const struct inet6_protocol tcpv6_protocol = {
diff --combined net/ipv6/udp.c
index 2415e55aaf8f590825f95cb236a3c1a496f4dbc2,dfa85e7264dfd54d38ccddb0920e09e789ef7643..1207379c1cce3a214afef4c2a39230360aa4bdc7
@@@ -498,7 -498,8 +498,8 @@@ try_again
  
        if (is_udp4) {
                if (inet->cmsg_flags)
-                       ip_cmsg_recv(msg, skb);
+                       ip_cmsg_recv_offset(msg, skb,
+                                           sizeof(struct udphdr), off);
        } else {
                if (np->rxopt.all)
                        ip6_datagram_recv_specific_ctl(sk, msg, skb);
@@@ -1243,7 -1244,6 +1244,7 @@@ do_udp_sendmsg
                fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
  
        fl6.flowi6_mark = sk->sk_mark;
 +      fl6.flowi6_uid = sock_i_uid(sk);
  
        if (msg->msg_controllen) {
                opt = &opt_space;
@@@ -1552,7 -1552,6 +1553,7 @@@ struct proto udpv6_prot = 
        .compat_getsockopt = compat_udpv6_getsockopt,
  #endif
        .clear_sk          = udp_v6_clear_sk,
 +      .diag_destroy      = udp_abort,
  };
  
  static struct inet_protosw udpv6_protosw = {