Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Nov 2014 22:24:33 +0000 (14:24 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Nov 2014 22:24:33 +0000 (14:24 -0800)
Pull arm64 fixes from Catalin Marinas:

 - fix EFI stub cache maintenance causing aborts during boot on certain
   platforms

 - handle byte stores in __clear_user without panicking

 - fix race condition in aarch64_insn_patch_text_sync() (instruction
   patching)

 - Couple of type fixes

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: ARCH_PFN_OFFSET should be unsigned long
  Correct the race condition in aarch64_insn_patch_text_sync()
  arm64: __clear_user: handle exceptions on strb
  arm64: Fix data type for physical address
  arm64: efi: Fix stub cache maintenance

181 files changed:
Documentation/devicetree/bindings/ata/sata_rcar.txt
Documentation/networking/ip-sysctl.txt
MAINTAINERS
arch/arm64/boot/dts/apm-storm.dtsi
arch/x86/kvm/emulate.c
arch/xtensa/Kconfig
arch/xtensa/boot/dts/lx200mx.dts [new file with mode: 0644]
arch/xtensa/configs/generic_kc705_defconfig [new file with mode: 0644]
arch/xtensa/configs/smp_lx200_defconfig [new file with mode: 0644]
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/uapi/asm/unistd.h
block/blk-merge.c
block/blk-mq.c
block/ioprio.c
block/scsi_ioctl.c
drivers/acpi/blacklist.c
drivers/ata/ahci.c
drivers/ata/libahci.c
drivers/ata/sata_rcar.c
drivers/base/power/domain.c
drivers/block/zram/zram_drv.c
drivers/char/hw_random/pseries-rng.c
drivers/char/virtio_console.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/crypto/caam/key_gen.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_transport.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/qat/qat_common/qat_crypto.c
drivers/crypto/qat/qat_dh895xcc/adf_admin.c
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xcc/adf_isr.c
drivers/firewire/core-cdev.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/si.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/ibmpowernv.c
drivers/hwmon/pwm-fan.c
drivers/md/dm-bufio.c
drivers/md/dm-raid.c
drivers/md/dm-stripe.c
drivers/md/dm-thin.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/mfd/max77693.c
drivers/mfd/rtsx_pcr.c
drivers/mfd/stmpe.h
drivers/mfd/twl4030-power.c
drivers/mfd/viperboard.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qualcomm/Kconfig
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpts.c
drivers/net/macvtap.c
drivers/net/phy/dp83640.c
drivers/net/phy/phy.c
drivers/net/ppp/ppp_generic.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/vxlan.c
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/platform/x86/Kconfig
drivers/platform/x86/hp_accel.c
drivers/s390/kvm/virtio_ccw.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/samsung/exynos_tmu_data.c
drivers/thermal/samsung/exynos_tmu_data.h
fs/ceph/caps.c
fs/notify/fsnotify.c
fs/notify/fsnotify.h
fs/notify/inode_mark.c
fs/notify/mark.c
fs/notify/vfsmount_mark.c
include/linux/bootmem.h
include/linux/mfd/max77693-private.h
include/linux/mmzone.h
include/linux/page-isolation.h
include/linux/pm_domain.h
include/linux/ring_buffer.h
include/linux/socket.h
include/net/9p/transport.h
include/net/udp_tunnel.h
include/uapi/linux/Kbuild
include/uapi/linux/if_bridge.h
init/main.c
kernel/audit.c
kernel/audit_tree.c
kernel/panic.c
kernel/power/suspend.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
lib/rhashtable.c
mm/bootmem.c
mm/compaction.c
mm/internal.h
mm/iov_iter.c
mm/memory_hotplug.c
mm/nobootmem.c
mm/page_alloc.c
mm/page_isolation.c
mm/slab_common.c
net/bridge/netfilter/nft_reject_bridge.c
net/ceph/crypto.c
net/ceph/osd_client.c
net/dsa/slave.c
net/ipv4/fou.c
net/ipv4/geneve.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp_input.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/sit.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/spectmgmt.c
net/netlink/af_netlink.c
net/sctp/auth.c
net/sctp/sm_make_chunk.c
security/selinux/hooks.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/usb/mixer_quirks.c
tools/testing/selftests/net/psock_fanout.c

index 1e6111333fa88f86a2008325637195f51d50648d..80ae87a0784bdba8f2014d51ec6b083512c1dc4f 100644 (file)
@@ -3,8 +3,10 @@
 Required properties:
 - compatible           : should contain one of the following:
                          - "renesas,sata-r8a7779" for R-Car H1
-                         - "renesas,sata-r8a7790" for R-Car H2
-                         - "renesas,sata-r8a7791" for R-Car M2
+                         - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
+                         - "renesas,sata-r8a7790" for R-Car H2 other than ES1
+                         - "renesas,sata-r8a7791" for R-Car M2-W
+                         - "renesas,sata-r8a7793" for R-Car M2-N
 - reg                  : address and length of the SATA registers;
 - interrupts           : must consist of one interrupt specifier.
 
index 0307e2875f2159cb669b741f9d6a949618c3a055..a476b08a43e0dac5642a0bdfba76b8523be1033e 100644 (file)
@@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN
        0 - disabled
        1 - enabled
 
+fwmark_reflect - BOOLEAN
+       Controls the fwmark of kernel-generated IPv4 reply packets that are not
+       associated with a socket for example, TCP RSTs or ICMP echo replies).
+       If unset, these packets have a fwmark of zero. If set, they have the
+       fwmark of the packet they are replying to.
+       Default: 0
+
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
@@ -1201,6 +1208,13 @@ conf/all/forwarding - BOOLEAN
 proxy_ndp - BOOLEAN
        Do proxy ndp.
 
+fwmark_reflect - BOOLEAN
+       Controls the fwmark of kernel-generated IPv6 reply packets that are not
+       associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
+       If unset, these packets have a fwmark of zero. If set, they have the
+       fwmark of the packet they are replying to.
+       Default: 0
+
 conf/interface/*:
        Change special settings per interface.
 
index ea4d0058fd1b68b7d5e11f5228a4867aa0b96eea..60b1163dba28a4dc880eeda43a1596d37081d469 100644 (file)
@@ -4716,6 +4716,7 @@ L:        linux-iio@vger.kernel.org
 S:     Maintained
 F:     drivers/iio/
 F:     drivers/staging/iio/
+F:     include/linux/iio/
 
 IKANOS/ADI EAGLE ADSL USB DRIVER
 M:     Matthieu Castet <castet.matthieu@free.fr>
index 295c72d52a1f7206f00a29305328f4f19a5f0705..f1ad9c2ab2e917197f3a3a00d7b8b8499138e933 100644 (file)
                        compatible = "apm,xgene-enet";
                        status = "disabled";
                        reg = <0x0 0x17020000 0x0 0xd100>,
-                             <0x0 0X17030000 0x0 0X400>,
+                             <0x0 0X17030000 0x0 0Xc300>,
                              <0x0 0X10000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
                        interrupts = <0x0 0x3c 0x4>;
                sgenet0: ethernet@1f210000 {
                        compatible = "apm,xgene-enet";
                        status = "disabled";
-                       reg = <0x0 0x1f210000 0x0 0x10000>,
-                             <0x0 0x1f200000 0x0 0X10000>,
-                             <0x0 0x1B000000 0x0 0X20000>;
+                       reg = <0x0 0x1f210000 0x0 0xd100>,
+                             <0x0 0x1f200000 0x0 0Xc300>,
+                             <0x0 0x1B000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
                        interrupts = <0x0 0xA0 0x4>;
                        dma-coherent;
                        compatible = "apm,xgene-enet";
                        status = "disabled";
                        reg = <0x0 0x1f610000 0x0 0xd100>,
-                             <0x0 0x1f600000 0x0 0X400>,
+                             <0x0 0x1f600000 0x0 0Xc300>,
                              <0x0 0x18000000 0x0 0X200>;
                        reg-names = "enet_csr", "ring_csr", "ring_cmd";
                        interrupts = <0x0 0x60 0x4>;
index 5edf088ca51e11789d6d26e3bfbdad16c1104220..9f8a2faf50407b6212f71a5de81aea8680855f87 100644 (file)
@@ -4287,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                fetch_register_operand(op);
                break;
        case OpCL:
+               op->type = OP_IMM;
                op->bytes = 1;
                op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
                break;
@@ -4294,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                rc = decode_imm(ctxt, op, 1, true);
                break;
        case OpOne:
+               op->type = OP_IMM;
                op->bytes = 1;
                op->val = 1;
                break;
@@ -4352,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                ctxt->memop.bytes = ctxt->op_bytes + 2;
                goto mem_common;
        case OpES:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_ES;
                break;
        case OpCS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_CS;
                break;
        case OpSS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_SS;
                break;
        case OpDS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_DS;
                break;
        case OpFS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_FS;
                break;
        case OpGS:
+               op->type = OP_IMM;
                op->val = VCPU_SREG_GS;
                break;
        case OpImplicit:
index 49c6c3d9444916e0dc727d51c96349e92c424d6a..81f57e8c8f1be91f6cffea3b157b6bead7a541e1 100644 (file)
@@ -319,8 +319,8 @@ config XTENSA_PLATFORM_S6105
 
 config XTENSA_PLATFORM_XTFPGA
        bool "XTFPGA"
+       select ETHOC if ETHERNET
        select SERIAL_CONSOLE
-       select ETHOC
        select XTENSA_CALIBRATE_CCOUNT
        help
          XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
@@ -367,7 +367,7 @@ config BUILTIN_DTB
 config BLK_DEV_SIMDISK
        tristate "Host file-based simulated block device support"
        default n
-       depends on XTENSA_PLATFORM_ISS
+       depends on XTENSA_PLATFORM_ISS && BLOCK
        help
          Create block devices that map to files in the host file system.
          Device binding to host file may be changed at runtime via proc
diff --git a/arch/xtensa/boot/dts/lx200mx.dts b/arch/xtensa/boot/dts/lx200mx.dts
new file mode 100644 (file)
index 0000000..249822b
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-16m.dtsi"
+
+/ {
+       compatible = "cdns,xtensa-lx200";
+       memory@0 {
+               device_type = "memory";
+               reg = <0x00000000 0x06000000>;
+       };
+       pic: pic {
+               compatible = "cdns,xtensa-mx";
+               #interrupt-cells = <2>;
+               interrupt-controller;
+       };
+};
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
new file mode 100644 (file)
index 0000000..f4b7b38
--- /dev/null
@@ -0,0 +1,131 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_DC233C=y
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="kc705"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
new file mode 100644 (file)
index 0000000..22eeacb
--- /dev/null
@@ -0,0 +1,135 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HAVE_SMP=y
+CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="lx200mx"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_VM=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
index b2173e5da601cbe57303f9060faacb0dde0310cf..0383aed5912111b6a0b2cd4d0c58f758a95b4a7e 100644 (file)
@@ -277,6 +277,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
 static inline pte_t pte_mkspecial(pte_t pte)
        { return pte; }
 
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
index 8883fc877c5c93334cacfd29ffe6a12300bb0961..db5bb72e2f4eda85f6ad5fe127be9796c8862ad9 100644 (file)
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
 #define __NR_pivot_root                        175
 __SYSCALL(175, sys_pivot_root, 2)
 #define __NR_umount                            176
-__SYSCALL(176, sys_umount, 2)
+__SYSCALL(176, sys_oldumount, 1)
+#define __ARCH_WANT_SYS_OLDUMOUNT
 #define __NR_swapoff                           177
 __SYSCALL(177, sys_swapoff, 1)
 #define __NR_sync                              178
@@ -742,7 +743,14 @@ __SYSCALL(335, sys_sched_getattr, 3)
 #define __NR_renameat2                         336
 __SYSCALL(336, sys_renameat2, 5)
 
-#define __NR_syscall_count                     337
+#define __NR_seccomp                           337
+__SYSCALL(337, sys_seccomp, 3)
+#define __NR_getrandom                         338
+__SYSCALL(338, sys_getrandom, 3)
+#define __NR_memfd_create                      339
+__SYSCALL(339, sys_memfd_create, 2)
+
+#define __NR_syscall_count                     340
 
 /*
  * sysxtensa syscall handler
index b3ac40aef46b317c5a432a92ba9b40c8e1942504..89b97b5e0881853054c0807c5607c20d91d90762 100644 (file)
@@ -97,19 +97,22 @@ void blk_recalc_rq_segments(struct request *rq)
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-       bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
-                       &q->queue_flags);
-       bool merge_not_need = bio->bi_vcnt < queue_max_segments(q);
+       unsigned short seg_cnt;
+
+       /* estimate segment number by bi_vcnt for non-cloned bio */
+       if (bio_flagged(bio, BIO_CLONED))
+               seg_cnt = bio_segments(bio);
+       else
+               seg_cnt = bio->bi_vcnt;
 
-       if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
-                       merge_not_need)
-               bio->bi_phys_segments = bio->bi_vcnt;
+       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+                       (seg_cnt < queue_max_segments(q)))
+               bio->bi_phys_segments = seg_cnt;
        else {
                struct bio *nxt = bio->bi_next;
 
                bio->bi_next = NULL;
-               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
-                               no_sg_merge && merge_not_need);
+               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
                bio->bi_next = nxt;
        }
 
index 68929bad9a6a4048151f485cc1e44db2d655fc0f..1d016fc9a8b640c54ce7e06e9f1ce1f293b694e6 100644 (file)
@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-/*
- * Guarantee no request is in use, so we can change any data structure of
- * the queue afterward.
- */
-void blk_mq_freeze_queue(struct request_queue *q)
+static void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        bool freeze;
 
@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
                percpu_ref_kill(&q->mq_usage_counter);
                blk_mq_run_queues(q, false);
        }
+}
+
+static void blk_mq_freeze_queue_wait(struct request_queue *q)
+{
        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
 }
 
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+       blk_mq_freeze_queue_start(q);
+       blk_mq_freeze_queue_wait(q);
+}
+
 static void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        bool wake;
@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q)
 /* Basically redo blk_mq_init_queue with queue frozen */
 static void blk_mq_queue_reinit(struct request_queue *q)
 {
-       blk_mq_freeze_queue(q);
+       WARN_ON_ONCE(!q->mq_freeze_depth);
 
        blk_mq_sysfs_unregister(q);
 
@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
        blk_mq_map_swqueue(q);
 
        blk_mq_sysfs_register(q);
-
-       blk_mq_unfreeze_queue(q);
 }
 
 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
                return NOTIFY_OK;
 
        mutex_lock(&all_q_mutex);
+
+       /*
+        * We need to freeze and reinit all existing queues.  Freezing
+        * involves synchronous wait for an RCU grace period and doing it
+        * one by one may take a long time.  Start freezing all queues in
+        * one swoop and then wait for the completions so that freezing can
+        * take place in parallel.
+        */
+       list_for_each_entry(q, &all_q_list, all_q_node)
+               blk_mq_freeze_queue_start(q);
+       list_for_each_entry(q, &all_q_list, all_q_node)
+               blk_mq_freeze_queue_wait(q);
+
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_queue_reinit(q);
+
+       list_for_each_entry(q, &all_q_list, all_q_node)
+               blk_mq_unfreeze_queue(q);
+
        mutex_unlock(&all_q_mutex);
        return NOTIFY_OK;
 }
index e50170ca7c33f446acc16e29a0d0097828919c30..31666c92b46af29919f42ea3e1093caed7127d71 100644 (file)
@@ -157,14 +157,16 @@ out:
 
 int ioprio_best(unsigned short aprio, unsigned short bprio)
 {
-       unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
-       unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
+       unsigned short aclass;
+       unsigned short bclass;
 
-       if (aclass == IOPRIO_CLASS_NONE)
-               aclass = IOPRIO_CLASS_BE;
-       if (bclass == IOPRIO_CLASS_NONE)
-               bclass = IOPRIO_CLASS_BE;
+       if (!ioprio_valid(aprio))
+               aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+       if (!ioprio_valid(bprio))
+               bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 
+       aclass = IOPRIO_PRIO_CLASS(aprio);
+       bclass = IOPRIO_PRIO_CLASS(bprio);
        if (aclass == bclass)
                return min(aprio, bprio);
        if (aclass > bclass)
index 1e053d911240b577df324912bb68af13e6174bcc..b0c2a616c8f9b859191c075526c473ea1df796bc 100644 (file)
@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
-               goto error;
+               goto error_free_buffer;
        }
        blk_rq_set_block_pc(rq);
 
@@ -531,9 +531,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        }
        
 error:
+       blk_put_request(rq);
+
+error_free_buffer:
        kfree(buffer);
-       if (rq)
-               blk_put_request(rq);
+
        return err;
 }
 EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
index ed122e17636e32298129a8e28429ef86cb27a4fa..7556e7c4a055cd0c2dad68fa11a3f8746e6ab16e 100644 (file)
@@ -290,6 +290,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                    DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Dell Vostro 3546",
+       .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index 5f039f1910677ff45d59cb4f7c0812f84f1dac23..e45f83789809a29a722448c589bcc6b5c60f0303 100644 (file)
@@ -60,6 +60,7 @@ enum board_ids {
        /* board IDs by feature in alphabetical order */
        board_ahci,
        board_ahci_ign_iferr,
+       board_ahci_nomsi,
        board_ahci_noncq,
        board_ahci_nosntf,
        board_ahci_yes_fbs,
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_nomsi] = {
+               AHCI_HFLAGS     (AHCI_HFLAG_NO_MSI),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        [board_ahci_noncq] = {
                AHCI_HFLAGS     (AHCI_HFLAG_NO_NCQ),
                .flags          = AHCI_FLAG_COMMON,
@@ -313,6 +321,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -475,10 +488,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
 
        /*
-        * Samsung SSDs found on some macbooks.  NCQ times out.
-        * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+        * Samsung SSDs found on some macbooks.  NCQ times out if MSI is
+        * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
         */
-       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
 
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -514,12 +527,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                                         struct ahci_host_priv *hpriv)
 {
-       unsigned int force_port_map = 0;
-       unsigned int mask_port_map = 0;
-
        if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
                dev_info(&pdev->dev, "JMB361 has only one port\n");
-               force_port_map = 1;
+               hpriv->force_port_map = 1;
        }
 
        /*
@@ -529,9 +539,9 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
         */
        if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
                if (pdev->device == 0x6121)
-                       mask_port_map = 0x3;
+                       hpriv->mask_port_map = 0x3;
                else
-                       mask_port_map = 0xf;
+                       hpriv->mask_port_map = 0xf;
                dev_info(&pdev->dev,
                          "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
        }
index 5eb61c9e63da95cbd24a54ee0604262288c8a9f6..97683e45ab043be5045ae22945a5520e845cfe4f 100644 (file)
@@ -1778,16 +1778,15 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
        }
 }
 
-static void ahci_update_intr_status(struct ata_port *ap)
+static void ahci_port_intr(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
-       struct ahci_port_priv *pp = ap->private_data;
        u32 status;
 
        status = readl(port_mmio + PORT_IRQ_STAT);
        writel(status, port_mmio + PORT_IRQ_STAT);
 
-       atomic_or(status, &pp->intr_status);
+       ahci_handle_port_interrupt(ap, port_mmio, status);
 }
 
 static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
@@ -1808,34 +1807,6 @@ static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
-irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
-{
-       struct ata_host *host = dev_instance;
-       struct ahci_host_priv *hpriv = host->private_data;
-       u32 irq_masked = hpriv->port_map;
-       unsigned int i;
-
-       for (i = 0; i < host->n_ports; i++) {
-               struct ata_port *ap;
-
-               if (!(irq_masked & (1 << i)))
-                       continue;
-
-               ap = host->ports[i];
-               if (ap) {
-                       ahci_port_thread_fn(irq, ap);
-                       VPRINTK("port %u\n", i);
-               } else {
-                       VPRINTK("port %u (no irq)\n", i);
-                       if (ata_ratelimit())
-                               dev_warn(host->dev,
-                                        "interrupt on disabled port %u\n", i);
-               }
-       }
-
-       return IRQ_HANDLED;
-}
-
 static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance)
 {
        struct ata_port *ap = dev_instance;
@@ -1875,6 +1846,8 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
 
        irq_masked = irq_stat & hpriv->port_map;
 
+       spin_lock(&host->lock);
+
        for (i = 0; i < host->n_ports; i++) {
                struct ata_port *ap;
 
@@ -1883,7 +1856,7 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
 
                ap = host->ports[i];
                if (ap) {
-                       ahci_update_intr_status(ap);
+                       ahci_port_intr(ap);
                        VPRINTK("port %u\n", i);
                } else {
                        VPRINTK("port %u (no irq)\n", i);
@@ -1906,9 +1879,11 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
         */
        writel(irq_stat, mmio + HOST_IRQ_STAT);
 
+       spin_unlock(&host->lock);
+
        VPRINTK("EXIT\n");
 
-       return handled ? IRQ_WAKE_THREAD : IRQ_NONE;
+       return IRQ_RETVAL(handled);
 }
 
 unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
@@ -2320,8 +2295,13 @@ static int ahci_port_start(struct ata_port *ap)
         */
        pp->intr_mask = DEF_PORT_IRQ;
 
-       spin_lock_init(&pp->lock);
-       ap->lock = &pp->lock;
+       /*
+        * Switch to per-port locking in case each port has its own MSI vector.
+        */
+       if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
+               spin_lock_init(&pp->lock);
+               ap->lock = &pp->lock;
+       }
 
        ap->private_data = pp;
 
@@ -2482,31 +2462,6 @@ out_free_irqs:
        return rc;
 }
 
-static int ahci_host_activate_single_irq(struct ata_host *host, int irq,
-                                        struct scsi_host_template *sht)
-{
-       int i, rc;
-
-       rc = ata_host_start(host);
-       if (rc)
-               return rc;
-
-       rc = devm_request_threaded_irq(host->dev, irq, ahci_single_irq_intr,
-                                      ahci_thread_fn, IRQF_SHARED,
-                                      dev_driver_string(host->dev), host);
-       if (rc)
-               return rc;
-
-       for (i = 0; i < host->n_ports; i++)
-               ata_port_desc(host->ports[i], "irq %d", irq);
-
-       rc = ata_host_register(host, sht);
-       if (rc)
-               devm_free_irq(host->dev, irq, host);
-
-       return rc;
-}
-
 /**
  *     ahci_host_activate - start AHCI host, request IRQs and register it
  *     @host: target ATA host
@@ -2532,7 +2487,8 @@ int ahci_host_activate(struct ata_host *host, int irq,
        if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
                rc = ahci_host_activate_multi_irqs(host, irq, sht);
        else
-               rc = ahci_host_activate_single_irq(host, irq, sht);
+               rc = ata_host_activate(host, irq, ahci_single_irq_intr,
+                                      IRQF_SHARED, sht);
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_host_activate);
index 61eb6d77dac7f507403578a283eade305378f566..ea1fbc1d4c5f1134d05e69a8e9d322c3f54d0789 100644 (file)
 enum sata_rcar_type {
        RCAR_GEN1_SATA,
        RCAR_GEN2_SATA,
+       RCAR_R8A7790_ES1_SATA,
 };
 
 struct sata_rcar_priv {
@@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host)
        ap->udma_mask   = ATA_UDMA6;
        ap->flags       |= ATA_FLAG_SATA;
 
+       if (priv->type == RCAR_R8A7790_ES1_SATA)
+               ap->flags       |= ATA_FLAG_NO_DIPM;
+
        ioaddr->cmd_addr = base + SDATA_REG;
        ioaddr->ctl_addr = base + SSDEVCON_REG;
        ioaddr->scr_addr = base + SCRSSTS_REG;
@@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host)
                sata_rcar_gen1_phy_init(priv);
                break;
        case RCAR_GEN2_SATA:
+       case RCAR_R8A7790_ES1_SATA:
                sata_rcar_gen2_phy_init(priv);
                break;
        default:
@@ -837,10 +842,18 @@ static struct of_device_id sata_rcar_match[] = {
                .compatible = "renesas,sata-r8a7790",
                .data = (void *)RCAR_GEN2_SATA
        },
+       {
+               .compatible = "renesas,sata-r8a7790-es1",
+               .data = (void *)RCAR_R8A7790_ES1_SATA
+       },
        {
                .compatible = "renesas,sata-r8a7791",
                .data = (void *)RCAR_GEN2_SATA
        },
+       {
+               .compatible = "renesas,sata-r8a7793",
+               .data = (void *)RCAR_GEN2_SATA
+       },
        { },
 };
 MODULE_DEVICE_TABLE(of, sata_rcar_match);
@@ -849,7 +862,9 @@ static const struct platform_device_id sata_rcar_id_table[] = {
        { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
        { "sata-r8a7779", RCAR_GEN1_SATA },
        { "sata-r8a7790", RCAR_GEN2_SATA },
+       { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
        { "sata-r8a7791", RCAR_GEN2_SATA },
+       { "sata-r8a7793", RCAR_GEN2_SATA },
        { },
 };
 MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
index 40bc2f4072cc28ea4138ae36b3b08cb96f2ed158..fb83d4acd400ef0c9e87fddf92f7c7f4da174487 100644 (file)
@@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
        struct device *dev = pdd->dev;
        int ret = 0;
 
-       if (gpd_data->need_restore)
+       if (gpd_data->need_restore > 0)
                return 0;
 
+       /*
+        * If the value of the need_restore flag is still unknown at this point,
+        * we trust that pm_genpd_poweroff() has verified that the device is
+        * already runtime PM suspended.
+        */
+       if (gpd_data->need_restore < 0) {
+               gpd_data->need_restore = 1;
+               return 0;
+       }
+
        mutex_unlock(&genpd->lock);
 
        genpd_start_dev(genpd, dev);
@@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
        mutex_lock(&genpd->lock);
 
        if (!ret)
-               gpd_data->need_restore = true;
+               gpd_data->need_restore = 1;
 
        return ret;
 }
@@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
 {
        struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
        struct device *dev = pdd->dev;
-       bool need_restore = gpd_data->need_restore;
+       int need_restore = gpd_data->need_restore;
 
-       gpd_data->need_restore = false;
+       gpd_data->need_restore = 0;
        mutex_unlock(&genpd->lock);
 
        genpd_start_dev(genpd, dev);
+
+       /*
+        * Call genpd_restore_dev() for recently added devices too (need_restore
+        * is negative then).
+        */
        if (need_restore)
                genpd_restore_dev(genpd, dev);
 
@@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
 static int pm_genpd_runtime_suspend(struct device *dev)
 {
        struct generic_pm_domain *genpd;
+       struct generic_pm_domain_data *gpd_data;
        bool (*stop_ok)(struct device *__dev);
        int ret;
 
@@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
                return 0;
 
        mutex_lock(&genpd->lock);
+
+       /*
+        * If we have an unknown state of the need_restore flag, it means none
+        * of the runtime PM callbacks has been invoked yet. Let's update the
+        * flag to reflect that the current state is active.
+        */
+       gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+       if (gpd_data->need_restore < 0)
+               gpd_data->need_restore = 0;
+
        genpd->in_progress++;
        pm_genpd_poweroff(genpd);
        genpd->in_progress--;
@@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
        spin_unlock_irq(&dev->power.lock);
 
        if (genpd->attach_dev)
-               genpd->attach_dev(dev);
+               genpd->attach_dev(genpd, dev);
 
        mutex_lock(&gpd_data->lock);
        gpd_data->base.dev = dev;
        list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
-       gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
+       gpd_data->need_restore = -1;
        gpd_data->td.constraint_changed = true;
        gpd_data->td.effective_constraint_ns = -1;
        mutex_unlock(&gpd_data->lock);
@@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
        genpd->max_off_time_changed = true;
 
        if (genpd->detach_dev)
-               genpd->detach_dev(dev);
+               genpd->detach_dev(genpd, dev);
 
        spin_lock_irq(&dev->power.lock);
 
@@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
 
        psd = dev_to_psd(dev);
        if (psd && psd->domain_data)
-               to_gpd_data(psd->domain_data)->need_restore = val;
+               to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
 
        spin_unlock_irqrestore(&dev->power.lock, flags);
 }
index 2ad0b5bce44be89494d9cb5f75da19b9e459cd10..3920ee45aa5942dd816a775180eeb16f662e804c 100644 (file)
@@ -560,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        }
 
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem);
+               if (user_mem)
+                       kunmap_atomic(user_mem);
                /* Free memory associated with this sector now. */
                bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
                zram_free_page(zram, index);
index 6226aa08c36af59b9406b5a278880537e16ac9a4..bcf86f91800a2c1810791a4e3a508d490de0d8f9 100644 (file)
 #include <asm/vio.h>
 
 
-static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 {
+       u64 buffer[PLPAR_HCALL_BUFSIZE];
+       size_t size = max < 8 ? max : 8;
        int rc;
 
-       rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+       rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
        if (rc != H_SUCCESS) {
                pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
                return -EIO;
        }
+       memcpy(data, buffer, size);
 
        /* The hypervisor interface returns 64 bits */
-       return 8;
+       return size;
 }
 
 /**
@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
 
 static struct hwrng pseries_rng = {
        .name           = KBUILD_MODNAME,
-       .data_read      = pseries_rng_data_read,
+       .read           = pseries_rng_read,
 };
 
 static int __init pseries_rng_probe(struct vio_dev *dev,
index bfa640023e64893251d2cb2cf54af5a853d89568..cf7a561fad7cd9f3cdd94fe16e5c22d65318428c 100644 (file)
@@ -1449,8 +1449,6 @@ static int add_port(struct ports_device *portdev, u32 id)
        spin_lock_init(&port->outvq_lock);
        init_waitqueue_head(&port->waitqueue);
 
-       virtio_device_ready(portdev->vdev);
-
        /* Fill the in_vq with buffers so the host can send us data. */
        nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
        if (!nr_added_bufs) {
@@ -2026,6 +2024,8 @@ static int virtcons_probe(struct virtio_device *vdev)
        spin_lock_init(&portdev->ports_lock);
        INIT_LIST_HEAD(&portdev->ports);
 
+       virtio_device_ready(portdev->vdev);
+
        if (multiport) {
                unsigned int nr_added_bufs;
 
index 23aaf40cf37f5b6b7e5416a0125e62d581864079..f657c571b18e4e6baaa52250640aac4e1fa26267 100644 (file)
@@ -166,8 +166,8 @@ try_again:
                if (ret == -EPROBE_DEFER)
                        dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
                else
-                       dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
-                               cpu);
+                       dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
+                               ret);
        } else {
                *cdev = cpu_dev;
                *creg = cpu_reg;
index 644b54e1e7d13e8ebc863f9bb8c694278d999227..4473eba1d6b0b6084f632a8cb25e7c7cd815170b 100644 (file)
@@ -1022,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       policy->governor = NULL;
+       if (policy)
+               policy->governor = NULL;
 
        return policy;
 }
index 871703c49d2c09923d162af7651518d46a5ff3a4..e1eaf4ff9762646acac020c85c77bb122c570f0e 100644 (file)
@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
        u32 *desc;
        struct split_key_result result;
        dma_addr_t dma_addr_in, dma_addr_out;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
        if (!desc) {
                dev_err(jrdev, "unable to allocate key input memory\n");
-               return -ENOMEM;
+               return ret;
        }
 
-       init_job_desc(desc, 0);
-
        dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
                                     DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, dma_addr_in)) {
                dev_err(jrdev, "unable to map key input memory\n");
-               kfree(desc);
-               return -ENOMEM;
+               goto out_free;
        }
+
+       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+                                     DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, dma_addr_out)) {
+               dev_err(jrdev, "unable to map key output memory\n");
+               goto out_unmap_in;
+       }
+
+       init_job_desc(desc, 0);
        append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
 
        /* Sets MDHA up into an HMAC-INIT */
@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
         * FIFO_STORE with the explicit split-key content store
         * (0x26 output type)
         */
-       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
-                                     DMA_FROM_DEVICE);
-       if (dma_mapping_error(jrdev, dma_addr_out)) {
-               dev_err(jrdev, "unable to map key output memory\n");
-               kfree(desc);
-               return -ENOMEM;
-       }
        append_fifo_store(desc, dma_addr_out, split_key_len,
                          LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 
@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 
        dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
                         DMA_FROM_DEVICE);
+out_unmap_in:
        dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
-
+out_free:
        kfree(desc);
-
        return ret;
 }
 EXPORT_SYMBOL(gen_split_key);
index 9282381b03ced19b0e890a10e51421224276be69..fe7b3f06f6e62ac9ab1e35a9bfb6a2c3f267c35a 100644 (file)
@@ -198,8 +198,7 @@ struct adf_accel_dev {
        struct dentry *debugfs_dir;
        struct list_head list;
        struct module *owner;
-       uint8_t accel_id;
-       uint8_t numa_node;
        struct adf_accel_pci accel_pci_dev;
+       uint8_t accel_id;
 } __packed;
 #endif
index 5f3fa45348b46c6c5f22093c7e5426ac43c4ec6d..9dd2cb72a4e862e8203b9a0ffdc109ce4480c0be 100644 (file)
@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
                WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
                ring = &bank->rings[i];
                if (hw_data->tx_rings_mask & (1 << i)) {
-                       ring->inflights = kzalloc_node(sizeof(atomic_t),
-                                                      GFP_KERNEL,
-                                                      accel_dev->numa_node);
+                       ring->inflights =
+                               kzalloc_node(sizeof(atomic_t),
+                                            GFP_KERNEL,
+                                            dev_to_node(&GET_DEV(accel_dev)));
                        if (!ring->inflights)
                                goto err;
                } else {
@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
        int i, ret;
 
        etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
-                               accel_dev->numa_node);
+                               dev_to_node(&GET_DEV(accel_dev)));
        if (!etr_data)
                return -ENOMEM;
 
        num_banks = GET_MAX_BANKS(accel_dev);
        size = num_banks * sizeof(struct adf_etr_bank_data);
-       etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+       etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+                                      dev_to_node(&GET_DEV(accel_dev)));
        if (!etr_data->banks) {
                ret = -ENOMEM;
                goto err_bank;
index f2e2f158cfbecec1bbef49a5b061d824c2365b3e..9e9619cd4a79b958dae8e6ceee84d0d267325322 100644 (file)
@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        if (unlikely(!n))
                return -EINVAL;
 
-       bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+       bufl = kmalloc_node(sz, GFP_ATOMIC,
+                           dev_to_node(&GET_DEV(inst->accel_dev)));
        if (unlikely(!bufl))
                return -ENOMEM;
 
@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                goto err;
 
        for_each_sg(assoc, sg, assoc_n, i) {
+               if (!sg->length)
+                       continue;
                bufl->bufers[bufs].addr = dma_map_single(dev,
                                                         sg_virt(sg),
                                                         sg->length,
@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                struct qat_alg_buf *bufers;
 
                buflout = kmalloc_node(sz, GFP_ATOMIC,
-                                      inst->accel_dev->numa_node);
+                                      dev_to_node(&GET_DEV(inst->accel_dev)));
                if (unlikely(!buflout))
                        goto err;
                bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
index 0d59bcb50de151c9355705ff39d18b9f600eb313..828f2a686aab26d474592829de8ac47f8f83c4fe 100644 (file)
@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 
        list_for_each(itr, adf_devmgr_get_head()) {
                accel_dev = list_entry(itr, struct adf_accel_dev, list);
-               if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+               if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
+                       dev_to_node(&GET_DEV(accel_dev)) < 0)
+                               && adf_dev_started(accel_dev))
                        break;
                accel_dev = NULL;
        }
        if (!accel_dev) {
-               pr_err("QAT: Could not find device on give node\n");
+               pr_err("QAT: Could not find device on node %d\n", node);
                accel_dev = adf_devmgr_get_first();
        }
        if (!accel_dev || !adf_dev_started(accel_dev))
@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
 
        for (i = 0; i < num_inst; i++) {
                inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
-                                   accel_dev->numa_node);
+                                   dev_to_node(&GET_DEV(accel_dev)));
                if (!inst)
                        goto err;
 
index 978d6c56639df105ffce07b9037d0556329c02c6..53c491b59f07c59ec9b4264b215078a8fe368be8 100644 (file)
@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
        uint64_t reg_val;
 
        admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
-                            accel_dev->numa_node);
+                            dev_to_node(&GET_DEV(accel_dev)));
        if (!admin)
                return -ENOMEM;
        admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
index 0d0435a41be996d239517e3325a36631802bb9d2..948f66be262b31eeeb51418696a3e482eb82736c 100644 (file)
@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
        kfree(accel_dev);
 }
 
-static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
-{
-       unsigned int bus_per_cpu = 0;
-       struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
-
-       if (!c->phys_proc_id)
-               return 0;
-
-       bus_per_cpu = 256 / (c->phys_proc_id + 1);
-
-       if (bus_per_cpu != 0)
-               return pdev->bus->number / bus_per_cpu;
-       return 0;
-}
-
 static int qat_dev_start(struct adf_accel_dev *accel_dev)
 {
        int cpus = num_online_cpus();
@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        void __iomem *pmisc_bar_addr = NULL;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       uint8_t node;
        int ret;
 
        switch (ent->device) {
@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -ENODEV;
        }
 
-       node = adf_get_dev_node_id(pdev);
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
        if (!accel_dev)
                return -ENOMEM;
 
-       accel_dev->numa_node = node;
        INIT_LIST_HEAD(&accel_dev->crypto_list);
 
        /* Add accel device to accel table.
@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        accel_dev->owner = THIS_MODULE;
        /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
        if (!hw_data) {
                ret = -ENOMEM;
                goto out_err;
index 67ec61e51185b3e402da164d0c15b4b464d53ec6..d96ee21b9b77815f8ed018a78a36d43db7fc3c95 100644 (file)
@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
        uint32_t msix_num_entries = hw_data->num_banks + 1;
 
        entries = kzalloc_node(msix_num_entries * sizeof(*entries),
-                              GFP_KERNEL, accel_dev->numa_node);
+                              GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
        if (!entries)
                return -ENOMEM;
 
index 5d997a33907e431b895111fc27039468be244614..2a3973a7c44179457f635196696bc78e756a76e1 100644 (file)
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
            _IOC_SIZE(cmd) > sizeof(buffer))
                return -ENOTTY;
 
-       if (_IOC_DIR(cmd) == _IOC_READ)
-               memset(&buffer, 0, _IOC_SIZE(cmd));
+       memset(&buffer, 0, sizeof(buffer));
 
        if (_IOC_DIR(cmd) & _IOC_WRITE)
                if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
index 055d5e7fbf12a14473c7b82f314630d1e5615b9c..2318b4c7a8f82399ffbeba7b5d2704c1cd3e5ec0 100644 (file)
@@ -986,6 +986,15 @@ static int i915_pm_freeze(struct device *dev)
        return i915_drm_freeze(drm_dev);
 }
 
+static int i915_pm_freeze_late(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+
+       return intel_suspend_complete(dev_priv);
+}
+
 static int i915_pm_thaw_early(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -1570,6 +1579,7 @@ static const struct dev_pm_ops i915_pm_ops = {
        .resume_early = i915_pm_resume_early,
        .resume = i915_pm_resume,
        .freeze = i915_pm_freeze,
+       .freeze_late = i915_pm_freeze_late,
        .thaw_early = i915_pm_thaw_early,
        .thaw = i915_pm_thaw,
        .poweroff = i915_pm_poweroff,
index b672b843fd5e5831323094824116cffe912d8a55..728938f02341e31c2946724c5ac6c2d1c3ccbd50 100644 (file)
@@ -1902,6 +1902,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
              GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
              GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
+       if (!USES_PPGTT(dev_priv->dev))
+               /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
+                * so RTL will always use the value corresponding to
+                * pat_sel = 000".
+                * So let's disable cache for GGTT to avoid screen corruptions.
+                * MOCS still can be used though.
+                * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
+                * before this patch, i.e. the same uncached + snooping access
+                * like on gen6/7 seems to be in effect.
+                * - So this just fixes blitter/render access. Again it looks
+                * like it's not just uncached access, but uncached + snooping.
+                * So we can still hold onto all our assumptions wrt cpu
+                * clflushing on LLC machines.
+                */
+               pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+
        /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
         * write would work. */
        I915_WRITE(GEN8_PRIVATE_PAT, pat);
index 0e018cb49147367f1fa1eacd5af6ed082ca29680..41b3be217493b9a26315e2c69968f1c080161fe6 100644 (file)
@@ -1098,12 +1098,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
+       int min;
 
        WARN_ON(panel->backlight.max == 0);
 
+       /*
+        * XXX: If the vbt value is 255, it makes min equal to max, which leads
+        * to problems. There are such machines out there. Either our
+        * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+        * against this by letting the minimum be at most (arbitrarily chosen)
+        * 25% of the max.
+        */
+       min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+       if (min != dev_priv->vbt.backlight.min_brightness) {
+               DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
+                             dev_priv->vbt.backlight.min_brightness, min);
+       }
+
        /* vbt value is a coefficient in range [0..255] */
-       return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
-                    0, panel->backlight.max);
+       return scale(min, 0, 255, 0, panel->backlight.max);
 }
 
 static int bdw_setup_backlight(struct intel_connector *connector)
index 377afa504d2bd045cfdc5f2eed06599610c30920..89c01fa6dd8e3208fc428379e36264560133b818 100644 (file)
@@ -4313,8 +4313,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
        /* init the CE partitions.  CE only used for gfx on CIK */
        radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
        radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
-       radeon_ring_write(ring, 0xc000);
-       radeon_ring_write(ring, 0xc000);
+       radeon_ring_write(ring, 0x8000);
+       radeon_ring_write(ring, 0x8000);
 
        /* setup clear context state */
        radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
@@ -9447,6 +9447,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
index 4e8432d07f15a84893cc6cff666992135abdb7ae..d748963af08b7ea3215cf6dcfbf9ff92bbec8b69 100644 (file)
@@ -667,17 +667,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        struct radeon_ib ib;
        unsigned i;
+       unsigned index;
        int r;
-       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
        u32 tmp = 0;
+       u64 gpu_addr;
 
-       if (!ptr) {
-               DRM_ERROR("invalid vram scratch pointer\n");
-               return -EINVAL;
-       }
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               index = R600_WB_DMA_RING_TEST_OFFSET;
+       else
+               index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+       gpu_addr = rdev->wb.gpu_addr + index;
 
        tmp = 0xCAFEDEAD;
-       writel(tmp, ptr);
+       rdev->wb.wb[index/4] = cpu_to_le32(tmp);
 
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
@@ -686,8 +689,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
 
        ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr);
        ib.ptr[3] = 1;
        ib.ptr[4] = 0xDEADBEEF;
        ib.length_dw = 5;
@@ -704,7 +707,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = readl(ptr);
+               tmp = le32_to_cpu(rdev->wb.wb[index/4]);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
index f37d39d2bbbcf67f1e22dff4202aba0aa4320ef0..85995b4e33387586448629c44ae31bbff103f10e 100644 (file)
@@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
@@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
index 10f8be0ee1736394acaf9bac24eb516707917fd2..b53b31a7b76fd67f17614a2889c5f00a1e35441e 100644 (file)
@@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
        uint32_t pixel_bytes1 = 0;
        uint32_t pixel_bytes2 = 0;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled) {
index aabc343b9a8faa10728b5dd73444048a243b82e7..cf0df45d455e91ae3a363095dd02dccbb728bab3 100644 (file)
@@ -338,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        struct radeon_ib ib;
        unsigned i;
+       unsigned index;
        int r;
-       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
        u32 tmp = 0;
+       u64 gpu_addr;
 
-       if (!ptr) {
-               DRM_ERROR("invalid vram scratch pointer\n");
-               return -EINVAL;
-       }
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               index = R600_WB_DMA_RING_TEST_OFFSET;
+       else
+               index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
 
-       tmp = 0xCAFEDEAD;
-       writel(tmp, ptr);
+       gpu_addr = rdev->wb.gpu_addr + index;
 
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
@@ -357,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
 
        ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
-       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
        ib.ptr[3] = 0xDEADBEEF;
        ib.length_dw = 4;
 
@@ -374,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = readl(ptr);
+               tmp = le32_to_cpu(rdev->wb.wb[index/4]);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
index 5f6db4629aaa4c04172fe092a0375a3a2a5d354f..9acb1c3c005b6ead68e940ba5443b88d3de0be0b 100644 (file)
@@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
        u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
        /* FIXME: implement full support */
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index 3462b64369bfe6142a4c8acfaec09e0bb7d8826c..0a2d36e8110838d059b73663e5e055052741272c 100644 (file)
@@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
        u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
        u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index 8a477bf1fdb31529173234f8f30a0b4e3fb9c608..c55d653aaf5f6bcfb0b80d24ad1c42025d36e267 100644 (file)
@@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
        struct drm_display_mode *mode0 = NULL;
        struct drm_display_mode *mode1 = NULL;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index eeea5b6a1775ee002f36682b7d092ab3b449d913..7d5083dc4acbad7d333766195586c3b5562a5f58 100644 (file)
@@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
index 73bd9e2e42bc3c7dfbd249c0a1d3d4e1c78a19f9..3402033fa52a7225c0a91846eba5237af8d08142 100644 (file)
@@ -1659,6 +1659,7 @@ void hid_disconnect(struct hid_device *hdev)
                hdev->hiddev_disconnect(hdev);
        if (hdev->claimed & HID_CLAIMED_HIDRAW)
                hidraw_disconnect(hdev);
+       hdev->claimed = 0;
 }
 EXPORT_SYMBOL_GPL(hid_disconnect);
 
index e23ab8b30626dae386ebc84e507c55a8e5dcea6c..7c863738e419969a9dd0115ca7321d884034ae59 100644 (file)
 #define USB_VENDOR_ID_ELAN             0x04f3
 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B    0x009b
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103    0x0103
 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F    0x016f
 
 #define USB_VENDOR_ID_ELECOM           0x056e
index 5014bb567b29cd8f2799873ffb869ac55a28c6ae..552671ee7c5d7c6344fb92bca48f4f2d601b640e 100644 (file)
@@ -72,6 +72,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
index fcdbde4ec692f25c11f0a44b281a3770ede2d0c0..3057dfc7e3bc6cde853660579b01b4cab3530344 100644 (file)
@@ -234,7 +234,7 @@ static const struct pci_device_id fam15h_power_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
-       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
index d2bf2c97ae7094c03ff836f6debf2de8ca66677c..6a30eeea94beff8ee3067de4106a8db1bec14a8a 100644 (file)
@@ -181,7 +181,7 @@ static int __init populate_attr_groups(struct platform_device *pdev)
 
        opal = of_find_node_by_path("/ibm,opal/sensors");
        if (!opal) {
-               dev_err(&pdev->dev, "Opal node 'sensors' not found\n");
+               dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
                return -ENODEV;
        }
 
@@ -335,7 +335,9 @@ static int __init ibmpowernv_init(void)
 
        err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
        if (err) {
-               pr_err("Platfrom driver probe failed\n");
+               if (err != -ENODEV)
+                       pr_err("Platform driver probe failed (%d)\n", err);
+
                goto exit_device_del;
        }
 
index 823c877a1ec0952ce5fab7a465c79826f3442764..1991d9032c3843de2ffcd20b82f5790d22ce2684 100644 (file)
@@ -161,10 +161,17 @@ static int pwm_fan_suspend(struct device *dev)
 static int pwm_fan_resume(struct device *dev)
 {
        struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+       unsigned long duty;
+       int ret;
 
-       if (ctx->pwm_value)
-               return pwm_enable(ctx->pwm);
-       return 0;
+       if (ctx->pwm_value == 0)
+               return 0;
+
+       duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
+       ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+       if (ret)
+               return ret;
+       return pwm_enable(ctx->pwm);
 }
 #endif
 
index 825ca1f87639aae4bf1c0f60567bc163c75b338b..afe79719ea329e72e0d8e8433222f75fe4a9ac0a 100644 (file)
@@ -1434,9 +1434,9 @@ static void drop_buffers(struct dm_bufio_client *c)
 
 /*
  * Test if the buffer is unused and too old, and commit it.
- * At if noio is set, we must not do any I/O because we hold
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
- * different bufio client.
+ * And if GFP_NOFS is used, we must not do any I/O because we hold
+ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
+ * rerouted to different bufio client.
  */
 static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
                                unsigned long max_jiffies)
@@ -1444,7 +1444,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
        if (jiffies - b->last_accessed < max_jiffies)
                return 0;
 
-       if (!(gfp & __GFP_IO)) {
+       if (!(gfp & __GFP_FS)) {
                if (test_bit(B_READING, &b->state) ||
                    test_bit(B_WRITING, &b->state) ||
                    test_bit(B_DIRTY, &b->state))
@@ -1486,7 +1486,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        unsigned long freed;
 
        c = container_of(shrink, struct dm_bufio_client, shrinker);
-       if (sc->gfp_mask & __GFP_IO)
+       if (sc->gfp_mask & __GFP_FS)
                dm_bufio_lock(c);
        else if (!dm_bufio_trylock(c))
                return SHRINK_STOP;
@@ -1503,7 +1503,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
        unsigned long count;
 
        c = container_of(shrink, struct dm_bufio_client, shrinker);
-       if (sc->gfp_mask & __GFP_IO)
+       if (sc->gfp_mask & __GFP_FS)
                dm_bufio_lock(c);
        else if (!dm_bufio_trylock(c))
                return 0;
index 4857fa4a5484ba8e4ae743f2e8d4a48205229875..07c0fa0fa284fbdc9e86673c219f99cd07a35f77 100644 (file)
@@ -789,8 +789,7 @@ struct dm_raid_superblock {
        __le32 layout;
        __le32 stripe_sectors;
 
-       __u8 pad[452];          /* Round struct to 512 bytes. */
-                               /* Always set to 0 when writing. */
+       /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
 } __packed;
 
 static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -827,7 +826,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
                    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
                        failed_devices |= (1ULL << i);
 
-       memset(sb, 0, sizeof(*sb));
+       memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
 
        sb->magic = cpu_to_le32(DM_RAID_MAGIC);
        sb->features = cpu_to_le32(0);  /* No features yet */
@@ -862,7 +861,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
        uint64_t events_sb, events_refsb;
 
        rdev->sb_start = 0;
-       rdev->sb_size = sizeof(*sb);
+       rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+       if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
+               DMERR("superblock size of a logical block is no longer valid");
+               return -EINVAL;
+       }
 
        ret = read_disk_sb(rdev, rdev->sb_size);
        if (ret)
@@ -1169,8 +1172,12 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
        raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
 
        for (i = 0; i < rs->md.raid_disks; i++) {
-               struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev);
+               struct request_queue *q;
+
+               if (!rs->dev[i].rdev.bdev)
+                       continue;
 
+               q = bdev_get_queue(rs->dev[i].rdev.bdev);
                if (!q || !blk_queue_discard(q))
                        return;
 
index d1600d2aa2e2e6983643ef0ef864195f858d4f9d..f8b37d4c05d8c301658a42c116110d1b63fc9323 100644 (file)
@@ -159,8 +159,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                sc->stripes_shift = __ffs(stripes);
 
        r = dm_set_target_max_io_len(ti, chunk_size);
-       if (r)
+       if (r) {
+               kfree(sc);
                return r;
+       }
 
        ti->num_flush_bios = stripes;
        ti->num_discard_bios = stripes;
index 4843801173fe11a99519b59dd808e46e02425ee4..0f86d802b533301bf8374eb07cd9224dd06c799c 100644 (file)
@@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_SUBMITTED;
        }
 
+       /*
+        * We must hold the virtual cell before doing the lookup, otherwise
+        * there's a race with discard.
+        */
+       build_virtual_key(tc->td, block, &key);
+       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
+               return DM_MAPIO_SUBMITTED;
+
        r = dm_thin_find_block(td, block, 0, &result);
 
        /*
@@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                         * shared flag will be set in their case.
                         */
                        thin_defer_bio(tc, bio);
+                       cell_defer_no_holder_no_free(tc, &cell1);
                        return DM_MAPIO_SUBMITTED;
                }
 
-               build_virtual_key(tc->td, block, &key);
-               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
-                       return DM_MAPIO_SUBMITTED;
-
                build_data_key(tc->td, result.block, &key);
                if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
                        cell_defer_no_holder_no_free(tc, &cell1);
@@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                         * of doing so.
                         */
                        handle_unserviceable_bio(tc->pool, bio);
+                       cell_defer_no_holder_no_free(tc, &cell1);
                        return DM_MAPIO_SUBMITTED;
                }
                /* fall through */
@@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                 * provide the hint to load the metadata into cache.
                 */
                thin_defer_bio(tc, bio);
+               cell_defer_no_holder_no_free(tc, &cell1);
                return DM_MAPIO_SUBMITTED;
 
        default:
@@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                 * pool is switched to fail-io mode.
                 */
                bio_io_error(bio);
+               cell_defer_no_holder_no_free(tc, &cell1);
                return DM_MAPIO_SUBMITTED;
        }
 }
index 37d367bb9aa8976653d5bbc80b29d0dfbf037372..bf2b80d5c4707a64210b5e57deb785069dc7d921 100644 (file)
@@ -42,6 +42,12 @@ struct btree_node {
 } __packed;
 
 
+/*
+ * Locks a block using the btree node validator.
+ */
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+                struct dm_block **result);
+
 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
                  struct dm_btree_value_type *vt);
 
index cf9fd676ae444ad29f8fb6f6ef58d85706438092..1b5e13ec7f96a670ed7a9b5b472a5d2ee95a7dff 100644 (file)
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
 
 /*----------------------------------------------------------------*/
 
-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
                 struct dm_block **result)
 {
        return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
index 416060c2570981d5035376fc02b100f3b6430e5b..200ac12a1d407b5c648c7271a995a022ac8e7efc 100644 (file)
@@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
  * FIXME: We shouldn't use a recursive algorithm when we have limited stack
  * space.  Also this only works for single level trees.
  */
-static int walk_node(struct ro_spine *s, dm_block_t block,
+static int walk_node(struct dm_btree_info *info, dm_block_t block,
                     int (*fn)(void *context, uint64_t *keys, void *leaf),
                     void *context)
 {
        int r;
        unsigned i, nr;
+       struct dm_block *node;
        struct btree_node *n;
        uint64_t keys;
 
-       r = ro_step(s, block);
-       n = ro_node(s);
+       r = bn_read_lock(info, block, &node);
+       if (r)
+               return r;
+
+       n = dm_block_data(node);
 
        nr = le32_to_cpu(n->header.nr_entries);
        for (i = 0; i < nr; i++) {
                if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
-                       r = walk_node(s, value64(n, i), fn, context);
+                       r = walk_node(info, value64(n, i), fn, context);
                        if (r)
                                goto out;
                } else {
@@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
        }
 
 out:
-       ro_pop(s);
+       dm_tm_unlock(info->tm, node);
        return r;
 }
 
@@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
                  int (*fn)(void *context, uint64_t *keys, void *leaf),
                  void *context)
 {
-       int r;
-       struct ro_spine spine;
-
        BUG_ON(info->levels > 1);
-
-       init_ro_spine(&spine, info);
-       r = walk_node(&spine, root, fn, context);
-       exit_ro_spine(&spine);
-
-       return r;
+       return walk_node(info, root, fn, context);
 }
 EXPORT_SYMBOL_GPL(dm_btree_walk);
index cf008f45968c77fad5cf64677adf1adc77c77b46..711773e8e64bdd9ee5835c8ae9d738aecf31eeba 100644 (file)
@@ -240,7 +240,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                goto err_irq_charger;
        }
 
-       ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+       ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq,
                                IRQF_ONESHOT | IRQF_SHARED |
                                IRQF_TRIGGER_FALLING, 0,
                                &max77693_muic_irq_chip,
@@ -250,6 +250,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                goto err_irq_muic;
        }
 
+       /* Unmask interrupts from all blocks in interrupt source register */
+       ret = regmap_update_bits(max77693->regmap,
+                               MAX77693_PMIC_REG_INTSRC_MASK,
+                               SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL);
+       if (ret < 0) {
+               dev_err(max77693->dev,
+                       "Could not unmask interrupts in INTSRC: %d\n",
+                       ret);
+               goto err_intsrc;
+       }
+
        pm_runtime_set_active(max77693->dev);
 
        ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
@@ -261,6 +272,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
 
 err_mfd:
        mfd_remove_devices(max77693->dev);
+err_intsrc:
        regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
 err_irq_muic:
        regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
index f2643c221d345ebca0b7a9ff3266443ac1f0801b..30f7ca89a0e68619319f69dcebb12037f6e4af92 100644 (file)
@@ -947,6 +947,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
        mutex_unlock(&pcr->pcr_mutex);
 }
 
+#ifdef CONFIG_PM
 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
 {
        if (pcr->ops->turn_off_led)
@@ -961,6 +962,7 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
        if (pcr->ops->force_power_down)
                pcr->ops->force_power_down(pcr, pm_state);
 }
+#endif
 
 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 {
index 2d045f26f193eb0e96ecf32ee1cc1f07b39ca99c..bee0abf82040001664c07e82c646e0e3e5afc259 100644 (file)
@@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE24XX_REG_CHIP_ID          0x80
 #define STMPE24XX_REG_IEGPIOR_LSB      0x18
 #define STMPE24XX_REG_ISGPIOR_MSB      0x19
-#define STMPE24XX_REG_GPMR_LSB         0xA5
+#define STMPE24XX_REG_GPMR_LSB         0xA4
 #define STMPE24XX_REG_GPSR_LSB         0x85
 #define STMPE24XX_REG_GPCR_LSB         0x88
 #define STMPE24XX_REG_GPDR_LSB         0x8B
index cf92a6d1c532ac578d6bd9d042f4d1e8ebe55e7c..50f9091bcd383e86be1149813f92c0c1186a4df2 100644 (file)
@@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b;
 #define PWR_DEVSLP             BIT(1)
 #define PWR_DEVOFF             BIT(0)
 
+/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */
+#define STARTON_SWBUG          BIT(7)  /* Start on watchdog */
+#define STARTON_VBUS           BIT(5)  /* Start on VBUS */
+#define STARTON_VBAT           BIT(4)  /* Start on battery insert */
+#define STARTON_RTC            BIT(3)  /* Start on RTC */
+#define STARTON_USB            BIT(2)  /* Start on USB host */
+#define STARTON_CHG            BIT(1)  /* Start on charger */
+#define STARTON_PWON           BIT(0)  /* Start on PWRON button */
+
 #define SEQ_OFFSYNC            (1 << 0)
 
 #define PHY_TO_OFF_PM_MASTER(p)                (p - 0x36)
@@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata)
        return 0;
 }
 
+static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues)
+{
+       u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION,
+                      TWL4030_PM_MASTER_CFG_P2_TRANSITION,
+                      TWL4030_PM_MASTER_CFG_P3_TRANSITION, };
+       u8 val;
+       int i, err;
+
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
+       if (err)
+               goto relock;
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+                              TWL4030_PM_MASTER_KEY_CFG2,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
+       if (err)
+               goto relock;
+
+       for (i = 0; i < sizeof(regs); i++) {
+               err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER,
+                                     &val, regs[i]);
+               if (err)
+                       break;
+               val = (~bitmask & val) | (bitmask & bitvalues);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+                                      val, regs[i]);
+               if (err)
+                       break;
+       }
+
+       if (err)
+               pr_err("TWL4030 Register access failed: %i\n", err);
+
+relock:
+       return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+                               TWL4030_PM_MASTER_PROTECT_KEY);
+}
+
 /*
  * In master mode, start the power off sequence.
  * After a successful execution, TWL shuts down the power to the SoC
@@ -615,6 +662,11 @@ void twl4030_power_off(void)
 {
        int err;
 
+       /* Disable start on charger or VBUS as it can break poweroff */
+       err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0);
+       if (err)
+               pr_err("TWL4030 Unable to configure start-up\n");
+
        err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
                               TWL4030_PM_MASTER_P1_SW_EVENTS);
        if (err)
index e00f5340ed872089d31998ff39d6bb57842e731a..3c2b8f9e3c84b858df3bce9677dee22e09a8b427 100644 (file)
@@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface,
                 version >> 8, version & 0xff,
                 vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
 
-       ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
-                               ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+       ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
+                               vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
+                               NULL);
        if (ret != 0) {
                dev_err(&interface->dev, "Failed to add mfd devices to core.");
                goto error;
index 63ea1941e973b6338aa7d4431ac2b9ef2d2e418b..7ba83ffb08ac73b6437f1fd4a87c3a560b9d4f84 100644 (file)
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
        xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
 }
 
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
+{
+       if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
+               return false;
+
+       if (ioread32(p->ring_csr_addr + SRST_ADDR))
+               return false;
+
+       return true;
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
 {
        u32 val;
 
+       if (!xgene_ring_mgr_init(pdata))
+               return -ENODEV;
+
        clk_prepare_enable(pdata->clk);
        clk_disable_unprepare(pdata->clk);
        clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
        val |= SCAN_AUTO_INCR;
        MGMT_CLOCK_SEL_SET(&val, 1);
        xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+
+       return 0;
 }
 
 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
index 38558584080ed8029a3640f171353fe09cb6503e..ec45f3256f0e3da2928c8be98ba9abcc0d58fb27 100644 (file)
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
 #define BLOCK_ETH_MAC_OFFSET           0x0000
 #define BLOCK_ETH_MAC_CSR_OFFSET       0x2800
 
+#define CLKEN_ADDR                     0xc208
+#define SRST_ADDR                      0xc200
+
 #define MAC_ADDR_REG_OFFSET            0x00
 #define MAC_COMMAND_REG_OFFSET         0x04
 #define MAC_WRITE_REG_OFFSET           0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
 
 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
 
 extern struct xgene_mac_ops xgene_gmac_ops;
 extern struct xgene_port_ops xgene_gport_ops;
index 3c208cc6f6bb470ae97d74f06fbaf0dcbaaf7ee6..123669696184fde824328fb61c9b2649f1c750c9 100644 (file)
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        struct device *dev = ndev_to_dev(ndev);
        struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
        struct xgene_enet_desc_ring *buf_pool = NULL;
-       u8 cpu_bufnum = 0, eth_bufnum = 0;
-       u8 bp_bufnum = 0x20;
-       u16 ring_id, ring_num = 0;
+       u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
+       u8 bp_bufnum = START_BP_BUFNUM;
+       u16 ring_id, ring_num = START_RING_NUM;
        int ret;
 
        /* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
        u16 dst_ring_num;
        int ret;
 
-       pdata->port_ops->reset(pdata);
+       ret = pdata->port_ops->reset(pdata);
+       if (ret)
+               return ret;
 
        ret = xgene_enet_create_desc_rings(ndev);
        if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
 
        return ret;
 err:
+       unregister_netdev(ndev);
        free_netdev(ndev);
        return ret;
 }
index 874e5a01161fb9e9e1a05eb90afee3b148c07182..f9958fae6ffdc9fcba7cbb8f8438d95024b40ac9 100644 (file)
@@ -38,6 +38,9 @@
 #define SKB_BUFFER_SIZE                (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
 #define NUM_PKT_BUF    64
 #define NUM_BUFPOOL    32
+#define START_ETH_BUFNUM       2
+#define START_BP_BUFNUM                0x22
+#define START_RING_NUM         8
 
 #define PHY_POLL_LINK_ON       (10 * HZ)
 #define PHY_POLL_LINK_OFF      (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
 };
 
 struct xgene_port_ops {
-       void (*reset)(struct xgene_enet_pdata *pdata);
+       int (*reset)(struct xgene_enet_pdata *pdata);
        void (*cle_bypass)(struct xgene_enet_pdata *pdata,
                           u32 dst_ring_num, u16 bufpool_id);
        void (*shutdown)(struct xgene_enet_pdata *pdata);
index c22f32622fa9a50ad3789d8000991ede58502ca0..f5d4f68c288c395076205ba128788797d7114cdf 100644 (file)
@@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
        xgene_sgmac_rxtx(p, TX_EN, false);
 }
 
-static void xgene_enet_reset(struct xgene_enet_pdata *p)
+static int xgene_enet_reset(struct xgene_enet_pdata *p)
 {
+       if (!xgene_ring_mgr_init(p))
+               return -ENODEV;
+
        clk_prepare_enable(p->clk);
        clk_disable_unprepare(p->clk);
        clk_prepare_enable(p->clk);
 
        xgene_enet_ecc_init(p);
        xgene_enet_config_ring_if_assoc(p);
+
+       return 0;
 }
 
 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
index 67d07206b3c7723e2287a41fb964860cc4b78d04..a18a9d1f11432d4469d2d711ec633f22565017eb 100644 (file)
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
        xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
 }
 
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
 {
+       if (!xgene_ring_mgr_init(pdata))
+               return -ENODEV;
+
        clk_prepare_enable(pdata->clk);
        clk_disable_unprepare(pdata->clk);
        clk_prepare_enable(pdata->clk);
 
        xgene_enet_ecc_init(pdata);
        xgene_enet_config_ring_if_assoc(pdata);
+
+       return 0;
 }
 
 static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
index 3a6778a667f4558f52f1327f413e088882490f7d..531bb7c57531b17fc64b0719690accba8afb6b1e 100644 (file)
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
        /* We just need one DMA descriptor which is DMA-able, since writing to
         * the port will allocate a new descriptor in its internal linked-list
         */
-       p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+       p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+                               GFP_KERNEL);
        if (!p) {
                netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
                return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
        if (!(reg & TDMA_DISABLED))
                netdev_warn(priv->netdev, "TDMA not stopped!\n");
 
+       /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
+        * fail, so by checking this pointer we know whether the TX ring was
+        * fully initialized or not.
+        */
+       if (!ring->cbs)
+               return;
+
        napi_disable(&ring->napi);
        netif_napi_del(&ring->napi);
 
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
        ring->cbs = NULL;
 
        if (ring->desc_dma) {
-               dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+               dma_free_coherent(kdev, sizeof(struct dma_desc),
+                                 ring->desc_cpu, ring->desc_dma);
                ring->desc_dma = 0;
        }
        ring->size = 0;
index fdc9ec09e453510636460780226e0349cc51b9a5..da1a2500c91ce43677e7cada4b66196c17af2513 100644 (file)
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
+       /* Re-configure the port multiplexer towards the PHY device */
+       bcmgenet_mii_config(priv->dev, false);
+
+       phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
+                          priv->phy_interface);
+
        bcmgenet_netif_start(dev);
 
        return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
 
        bcmgenet_netif_stop(dev);
 
+       /* Really kill the PHY state machine and disconnect from it */
+       phy_disconnect(priv->phydev);
+
        /* Disable MAC receive */
        umac_enable_set(priv, CMD_RX_EN, false);
 
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
 
        phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
-       bcmgenet_mii_config(priv->dev);
+       bcmgenet_mii_config(priv->dev, false);
 
        /* disable ethernet MAC while updating its registers */
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
index dbf524ea3b19561bc1098b3cd58620cf9d66ac43..31b2da5f9b821342315ddebd958a765e7560cd51 100644 (file)
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_mii_setup(struct net_device *dev);
 
 /* Wake-on-LAN routines */
 void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
index 9ff799a9f8019dafe2d47be33aba738921e588a3..933cd7e7cd33708bf89292a7bb50035165d0e867 100644 (file)
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
 /* setup netdev link state when PHY link status change and
  * update UMAC and RGMII block when link up
  */
-static void bcmgenet_mii_setup(struct net_device *dev)
+void bcmgenet_mii_setup(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
        bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
 }
 
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
                return -EINVAL;
        }
 
-       dev_info(kdev, "configuring instance for %s\n", phy_name);
+       if (init)
+               dev_info(kdev, "configuring instance for %s\n", phy_name);
 
        return 0;
 }
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
         * PHY speed which is needed for bcmgenet_mii_config() to configure
         * things appropriately.
         */
-       ret = bcmgenet_mii_config(dev);
+       ret = bcmgenet_mii_config(dev, true);
        if (ret) {
                phy_disconnect(priv->phydev);
                return ret;
index 6fe300e316c3c41e398b06f1864cfdc7f1f0ebd6..cca6049940037331911ba26bb5fe4c35b48e5059 100644 (file)
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
                app.protocol = dcb->app_priority[i].protocolid;
 
                if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+                       app.priority = dcb->app_priority[i].user_prio_map;
                        app.selector = dcb->app_priority[i].sel_field + 1;
-                       err = dcb_ieee_setapp(dev, &app);
+                       err = dcb_ieee_delapp(dev, &app);
                } else {
                        app.selector = !!(dcb->app_priority[i].sel_field);
                        err = dcb_setapp(dev, &app);
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
                case CXGB4_DCB_INPUT_FW_ENABLED: {
                        /* we're going to use Firmware DCB */
                        dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
-                       dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+                       dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
+                       if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
+                               dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
+                       else
+                               dcb->supported |= DCB_CAP_DCBX_VER_CEE;
                        break;
                }
 
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
        *up_tc_map = (1 << tc);
 
        /* prio_type is link strict */
-       *prio_type = 0x2;
+       if (*pgid != 0xF)
+               *prio_type = 0x2;
 }
 
 static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
                                u8 *prio_type, u8 *pgid, u8 *bw_per,
                                u8 *up_tc_map)
 {
-       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
+       /* tc 0 is written at MSB position */
+       return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+                               up_tc_map, 1);
 }
 
 
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
                                u8 *prio_type, u8 *pgid, u8 *bw_per,
                                u8 *up_tc_map)
 {
-       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
+       /* tc 0 is written at MSB position */
+       return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+                               up_tc_map, 0);
 }
 
 static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
        struct fw_port_cmd pcmd;
        struct port_info *pi = netdev2pinfo(dev);
        struct adapter *adap = pi->adapter;
+       int fw_tc = 7 - tc;
        u32 _pgid;
        int err;
 
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
        }
 
        _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
-       _pgid &= ~(0xF << (tc * 4));
-       _pgid |= pgid << (tc * 4);
+       _pgid &= ~(0xF << (fw_tc * 4));
+       _pgid |= pgid << (fw_tc * 4);
        pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
 
        INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
            priority >= CXGB4_MAX_PRIORITY)
                *pfccfg = 0;
        else
-               *pfccfg = (pi->dcb.pfcen >> priority) & 1;
+               *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
 }
 
 /* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
        pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
 
        if (pfccfg)
-               pcmd.u.dcb.pfc.pfcen |= (1 << priority);
+               pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
        else
-               pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
+               pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
 
        err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
        if (err != FW_PORT_DCB_CFG_SUCCESS) {
index 5e1b314e11af674f8d0f7f1ebf3bed60835dda08..39f2b13e66c731add3b842ef8a8db1fcde906c4b 100644 (file)
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
 int t4_sge_init(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
-       u32 sge_control, sge_conm_ctrl;
+       u32 sge_control, sge_control2, sge_conm_ctrl;
+       unsigned int ingpadboundary, ingpackboundary;
        int ret, egress_threshold;
 
        /*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
        sge_control = t4_read_reg(adap, SGE_CONTROL);
        s->pktshift = PKTSHIFT_GET(sge_control);
        s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
-       s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
-                           X_INGPADBOUNDARY_SHIFT);
+
+       /* T4 uses a single control field to specify both the PCIe Padding and
+        * Packing Boundary.  T5 introduced the ability to specify these
+        * separately.  The actual Ingress Packet Data alignment boundary
+        * within Packed Buffer Mode is the maximum of these two
+        * specifications.
+        */
+       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
+                              X_INGPADBOUNDARY_SHIFT);
+       if (is_t4(adap->params.chip)) {
+               s->fl_align = ingpadboundary;
+       } else {
+               /* T5 has a different interpretation of one of the PCIe Packing
+                * Boundary values.
+                */
+               sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+               ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+               if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+                       ingpackboundary = 16;
+               else
+                       ingpackboundary = 1 << (ingpackboundary +
+                                               INGPACKBOUNDARY_SHIFT_X);
+
+               s->fl_align = max(ingpadboundary, ingpackboundary);
+       }
 
        if (adap->flags & USING_SOFT_PARAMS)
                ret = t4_sge_init_soft(adap);
index a9d9d74e4f092f969c2d58eaf01455b7bca7e257..163a2a14948cf8e3f5d78cf63588ee3905baa315 100644 (file)
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                     HOSTPAGESIZEPF6(sge_hps) |
                     HOSTPAGESIZEPF7(sge_hps));
 
-       t4_set_reg_field(adap, SGE_CONTROL,
-                        INGPADBOUNDARY_MASK |
-                        EGRSTATUSPAGESIZE_MASK,
-                        INGPADBOUNDARY(fl_align_log - 5) |
-                        EGRSTATUSPAGESIZE(stat_len != 64));
-
+       if (is_t4(adap->params.chip)) {
+               t4_set_reg_field(adap, SGE_CONTROL,
+                                INGPADBOUNDARY_MASK |
+                                EGRSTATUSPAGESIZE_MASK,
+                                INGPADBOUNDARY(fl_align_log - 5) |
+                                EGRSTATUSPAGESIZE(stat_len != 64));
+       } else {
+               /* T5 introduced the separation of the Free List Padding and
+                * Packing Boundaries.  Thus, we can select a smaller Padding
+                * Boundary to avoid uselessly chewing up PCIe Link and Memory
+                * Bandwidth, and use a Packing Boundary which is large enough
+                * to avoid false sharing between CPUs, etc.
+                *
+                * For the PCI Link, the smaller the Padding Boundary the
+                * better.  For the Memory Controller, a smaller Padding
+                * Boundary is better until we cross under the Memory Line
+                * Size (the minimum unit of transfer to/from Memory).  If we
+                * have a Padding Boundary which is smaller than the Memory
+                * Line Size, that'll involve a Read-Modify-Write cycle on the
+                * Memory Controller which is never good.  For T5 the smallest
+                * Padding Boundary which we can select is 32 bytes which is
+                * larger than any known Memory Controller Line Size so we'll
+                * use that.
+                *
+                * T5 has a different interpretation of the "0" value for the
+                * Packing Boundary.  This corresponds to 16 bytes instead of
+                * the expected 32 bytes.  We never have a Packing Boundary
+                * less than 32 bytes so we can't use that special value but
+                * on the other hand, if we wanted 32 bytes, the best we can
+                * really do is 64 bytes.
+               */
+               if (fl_align <= 32) {
+                       fl_align = 64;
+                       fl_align_log = 6;
+               }
+               t4_set_reg_field(adap, SGE_CONTROL,
+                                INGPADBOUNDARY_MASK |
+                                EGRSTATUSPAGESIZE_MASK,
+                                INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
+                                EGRSTATUSPAGESIZE(stat_len != 64));
+               t4_set_reg_field(adap, SGE_CONTROL2_A,
+                                INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+                                INGPACKBOUNDARY_V(fl_align_log -
+                                                INGPACKBOUNDARY_SHIFT_X));
+       }
        /*
         * Adjust various SGE Free List Host Buffer Sizes.
         *
index a1024db5dc136bb2a64fe5d3d696580ae3e38434..8d2de1006b084574e78a9095a721cd359b115945 100644 (file)
@@ -95,6 +95,7 @@
 #define X_INGPADBOUNDARY_SHIFT 5
 
 #define SGE_CONTROL 0x1008
+#define SGE_CONTROL2_A         0x1124
 #define  DCASYSTYPE             0x00080000U
 #define  RXPKTCPLMODE_MASK      0x00040000U
 #define  RXPKTCPLMODE_SHIFT     18
 #define  PKTSHIFT_SHIFT         10
 #define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
 #define  PKTSHIFT_GET(x)       (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
+#define  INGPCIEBOUNDARY_32B_X 0
 #define  INGPCIEBOUNDARY_MASK   0x00000380U
 #define  INGPCIEBOUNDARY_SHIFT  7
 #define  INGPCIEBOUNDARY(x)     ((x) << INGPCIEBOUNDARY_SHIFT)
 #define  INGPADBOUNDARY(x)      ((x) << INGPADBOUNDARY_SHIFT)
 #define  INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
                                 >> INGPADBOUNDARY_SHIFT)
+#define  INGPACKBOUNDARY_16B_X 0
+#define  INGPACKBOUNDARY_SHIFT_X 5
+
+#define  INGPACKBOUNDARY_S     16
+#define  INGPACKBOUNDARY_M     0x7U
+#define  INGPACKBOUNDARY_V(x)  ((x) << INGPACKBOUNDARY_S)
+#define  INGPACKBOUNDARY_G(x)  (((x) >> INGPACKBOUNDARY_S) \
+                                & INGPACKBOUNDARY_M)
 #define  EGRPCIEBOUNDARY_MASK   0x0000000eU
 #define  EGRPCIEBOUNDARY_SHIFT  1
 #define  EGRPCIEBOUNDARY(x)     ((x) << EGRPCIEBOUNDARY_SHIFT)
index 68eaa9c88c7d8a77646bd217e00877a281254a59..3d06e77d7121510e70c846c5d9a6c0e7526aa736 100644 (file)
@@ -299,6 +299,14 @@ struct sge {
        u16 timer_val[SGE_NTIMERS];     /* interrupt holdoff timer array */
        u8 counter_val[SGE_NCOUNTERS];  /* interrupt RX threshold array */
 
+       /* Decoded Adapter Parameters.
+        */
+       u32 fl_pg_order;                /* large page allocation size */
+       u32 stat_len;                   /* length of status page at ring end */
+       u32 pktshift;                   /* padding between CPL & packet data */
+       u32 fl_align;                   /* response queue message alignment */
+       u32 fl_starve_thres;            /* Free List starvation threshold */
+
        /*
         * Reverse maps from Absolute Queue IDs to associated queue pointers.
         * The absolute Queue IDs are in a compact range which start at a
index 85036e6b42c4cd4f7663ef2487948448d2d04be8..fdd078d7d82c661ec5c63e8198c9ae6d464366c3 100644 (file)
 #include "../cxgb4/t4fw_api.h"
 #include "../cxgb4/t4_msg.h"
 
-/*
- * Decoded Adapter Parameters.
- */
-static u32 FL_PG_ORDER;                /* large page allocation size */
-static u32 STAT_LEN;           /* length of status page at ring end */
-static u32 PKTSHIFT;           /* padding between CPL and packet data */
-static u32 FL_ALIGN;           /* response queue message alignment */
-
 /*
  * Constants ...
  */
@@ -101,12 +93,6 @@ enum {
        TX_QCHECK_PERIOD = (HZ / 2),
        MAX_TIMER_TX_RECLAIM = 100,
 
-       /*
-        * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
-        * timer will attempt to refill it.
-        */
-       FL_STARVE_THRES = 4,
-
        /*
         * Suspend an Ethernet TX queue with fewer available descriptors than
         * this.  We always want to have room for a maximum sized packet:
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
 
 /**
  *     fl_starving - return whether a Free List is starving.
+ *     @adapter: pointer to the adapter
  *     @fl: the Free List
  *
  *     Tests specified Free List to see whether the number of buffers
  *     available to the hardware has falled below our "starvation"
  *     threshold.
  */
-static inline bool fl_starving(const struct sge_fl *fl)
+static inline bool fl_starving(const struct adapter *adapter,
+                              const struct sge_fl *fl)
 {
-       return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+       const struct sge *s = &adapter->sge;
+
+       return fl->avail - fl->pend_cred <= s->fl_starve_thres;
 }
 
 /**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
 
 /**
  *     get_buf_size - return the size of an RX Free List buffer.
+ *     @adapter: pointer to the associated adapter
  *     @sdesc: pointer to the software buffer descriptor
  */
-static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+static inline int get_buf_size(const struct adapter *adapter,
+                              const struct rx_sw_desc *sdesc)
 {
-       return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
-               ? (PAGE_SIZE << FL_PG_ORDER)
-               : PAGE_SIZE;
+       const struct sge *s = &adapter->sge;
+
+       return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+               ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
 }
 
 /**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
 
                if (is_buf_mapped(sdesc))
                        dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
-                                      get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+                                      get_buf_size(adapter, sdesc),
+                                      PCI_DMA_FROMDEVICE);
                put_page(sdesc->page);
                sdesc->page = NULL;
                if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
 
        if (is_buf_mapped(sdesc))
                dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
-                              get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+                              get_buf_size(adapter, sdesc),
+                              PCI_DMA_FROMDEVICE);
        sdesc->page = NULL;
        if (++fl->cidx == fl->size)
                fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
                              int n, gfp_t gfp)
 {
+       struct sge *s = &adapter->sge;
        struct page *page;
        dma_addr_t dma_addr;
        unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
         * If we don't support large pages, drop directly into the small page
         * allocation code.
         */
-       if (FL_PG_ORDER == 0)
+       if (s->fl_pg_order == 0)
                goto alloc_small_pages;
 
        while (n) {
                page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
-                                  FL_PG_ORDER);
+                                  s->fl_pg_order);
                if (unlikely(!page)) {
                        /*
                         * We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
                        fl->large_alloc_failed++;
                        break;
                }
-               poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+               poison_buf(page, PAGE_SIZE << s->fl_pg_order);
 
                dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
-                                       PAGE_SIZE << FL_PG_ORDER,
+                                       PAGE_SIZE << s->fl_pg_order,
                                        PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
                        /*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
                         * because DMA mapping resources are typically
                         * critical resources once they become scarse.
                         */
-                       __free_pages(page, FL_PG_ORDER);
+                       __free_pages(page, s->fl_pg_order);
                        goto out;
                }
                dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
        fl->pend_cred += cred;
        ring_fl_db(adapter, fl);
 
-       if (unlikely(fl_starving(fl))) {
+       if (unlikely(fl_starving(adapter, fl))) {
                smp_wmb();
                set_bit(fl->cntxt_id, adapter->sge.starving_fl);
        }
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                   const struct cpl_rx_pkt *pkt)
 {
+       struct adapter *adapter = rxq->rspq.adapter;
+       struct sge *s = &adapter->sge;
        int ret;
        struct sk_buff *skb;
 
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                return;
        }
 
-       copy_frags(skb, gl, PKTSHIFT);
-       skb->len = gl->tot_len - PKTSHIFT;
+       copy_frags(skb, gl, s->pktshift);
+       skb->len = gl->tot_len - s->pktshift;
        skb->data_len = skb->len;
        skb->truesize += skb->data_len;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
        bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
                       (rspq->netdev->features & NETIF_F_RXCSUM);
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+       struct adapter *adapter = rspq->adapter;
+       struct sge *s = &adapter->sge;
 
        /*
         * If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
                rxq->stats.rx_drops++;
                return 0;
        }
-       __skb_pull(skb, PKTSHIFT);
+       __skb_pull(skb, s->pktshift);
        skb->protocol = eth_type_trans(skb, rspq->netdev);
        skb_record_rx_queue(skb, rspq->idx);
        rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
 static int process_responses(struct sge_rspq *rspq, int budget)
 {
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+       struct adapter *adapter = rspq->adapter;
+       struct sge *s = &adapter->sge;
        int budget_left = budget;
 
        while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                                BUG_ON(frag >= MAX_SKB_FRAGS);
                                BUG_ON(rxq->fl.avail == 0);
                                sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
-                               bufsz = get_buf_size(sdesc);
+                               bufsz = get_buf_size(adapter, sdesc);
                                fp->page = sdesc->page;
                                fp->offset = rspq->offset;
                                fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                         */
                        ret = rspq->handler(rspq, rspq->cur_desc, &gl);
                        if (likely(ret == 0))
-                               rspq->offset += ALIGN(fp->size, FL_ALIGN);
+                               rspq->offset += ALIGN(fp->size, s->fl_align);
                        else
                                restore_rx_bufs(&gl, &rxq->fl, frag);
                } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
                         * schedule napi but the FL is no longer starving.
                         * No biggie.
                         */
-                       if (fl_starving(fl)) {
+                       if (fl_starving(adapter, fl)) {
                                struct sge_eth_rxq *rxq;
 
                                rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                       int intr_dest,
                       struct sge_fl *fl, rspq_handler_t hnd)
 {
+       struct sge *s = &adapter->sge;
        struct port_info *pi = netdev_priv(dev);
        struct fw_iq_cmd cmd, rpl;
        int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
                fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
                                      sizeof(__be64), sizeof(struct rx_sw_desc),
-                                     &fl->addr, &fl->sdesc, STAT_LEN);
+                                     &fl->addr, &fl->sdesc, s->stat_len);
                if (!fl->desc) {
                        ret = -ENOMEM;
                        goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                 * free list ring) in Egress Queue Units.
                 */
                flsz = (fl->size / FL_PER_EQ_UNIT +
-                       STAT_LEN / EQ_UNIT);
+                       s->stat_len / EQ_UNIT);
 
                /*
                 * Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
                           struct net_device *dev, struct netdev_queue *devq,
                           unsigned int iqid)
 {
+       struct sge *s = &adapter->sge;
        int ret, nentries;
        struct fw_eq_eth_cmd cmd, rpl;
        struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
         * Calculate the size of the hardware TX Queue (including the Status
         * Page on the end of the TX Queue) in units of TX Descriptors.
         */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        /*
         * Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
        txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
                                 sizeof(struct tx_desc),
                                 sizeof(struct tx_sw_desc),
-                                &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+                                &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
        if (!txq->q.desc)
                return -ENOMEM;
 
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
  */
 static void free_txq(struct adapter *adapter, struct sge_txq *tq)
 {
+       struct sge *s = &adapter->sge;
+
        dma_free_coherent(adapter->pdev_dev,
-                         tq->size * sizeof(*tq->desc) + STAT_LEN,
+                         tq->size * sizeof(*tq->desc) + s->stat_len,
                          tq->desc, tq->phys_addr);
        tq->cntxt_id = 0;
        tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
                         struct sge_fl *fl)
 {
+       struct sge *s = &adapter->sge;
        unsigned int flid = fl ? fl->cntxt_id : 0xffff;
 
        t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
        if (fl) {
                free_rx_bufs(adapter, fl, fl->avail);
                dma_free_coherent(adapter->pdev_dev,
-                                 fl->size * sizeof(*fl->desc) + STAT_LEN,
+                                 fl->size * sizeof(*fl->desc) + s->stat_len,
                                  fl->desc, fl->addr);
                kfree(fl->sdesc);
                fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
        u32 fl0 = sge_params->sge_fl_buffer_size[0];
        u32 fl1 = sge_params->sge_fl_buffer_size[1];
        struct sge *s = &adapter->sge;
+       unsigned int ingpadboundary, ingpackboundary;
 
        /*
         * Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
         * Now translate the adapter parameters into our internal forms.
         */
        if (fl1)
-               FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
-       STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
-                   ? 128 : 64);
-       PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
-       FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
-                        SGE_INGPADBOUNDARY_SHIFT);
+               s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+       s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+                       ? 128 : 64);
+       s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+
+       /* T4 uses a single control field to specify both the PCIe Padding and
+        * Packing Boundary.  T5 introduced the ability to specify these
+        * separately.  The actual Ingress Packet Data alignment boundary
+        * within Packed Buffer Mode is the maximum of these two
+        * specifications.  (Note that it makes no real practical sense to
+        * have the Pading Boudary be larger than the Packing Boundary but you
+        * could set the chip up that way and, in fact, legacy T4 code would
+        * end doing this because it would initialize the Padding Boundary and
+        * leave the Packing Boundary initialized to 0 (16 bytes).)
+        */
+       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+                              X_INGPADBOUNDARY_SHIFT);
+       if (is_t4(adapter->params.chip)) {
+               s->fl_align = ingpadboundary;
+       } else {
+               /* T5 has a different interpretation of one of the PCIe Packing
+                * Boundary values.
+                */
+               ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
+               if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+                       ingpackboundary = 16;
+               else
+                       ingpackboundary = 1 << (ingpackboundary +
+                                               INGPACKBOUNDARY_SHIFT_X);
+
+               s->fl_align = max(ingpadboundary, ingpackboundary);
+       }
+
+       /* A FL with <= fl_starve_thres buffers is starving and a periodic
+        * timer will attempt to refill it.  This needs to be larger than the
+        * SGE's Egress Congestion Threshold.  If it isn't, then we can get
+        * stuck waiting for new packets while the SGE is waiting for us to
+        * give it more Free List entries.  (Note that the SGE's Egress
+        * Congestion Threshold is in units of 2 Free List pointers.)
+        */
+       s->fl_starve_thres
+               = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
 
        /*
         * Set up tasklet timers.
index 95df61dcb4ce57021237a15013bfdc8e88c0fa5d..4b6a6d14d86d98df42bea1de5a6a507d9820374e 100644 (file)
@@ -134,11 +134,13 @@ struct dev_params {
  */
 struct sge_params {
        u32 sge_control;                /* padding, boundaries, lengths, etc. */
+       u32 sge_control2;               /* T5: more of the same */
        u32 sge_host_page_size;         /* RDMA page sizes */
        u32 sge_queues_per_page;        /* RDMA queues/page */
        u32 sge_user_mode_limits;       /* limits for BAR2 user mode accesses */
        u32 sge_fl_buffer_size[16];     /* free list buffer sizes */
        u32 sge_ingress_rx_threshold;   /* RX counter interrupt threshold[4] */
+       u32 sge_congestion_control;     /* congestion thresholds, etc. */
        u32 sge_timer_value_0_and_1;    /* interrupt coalescing timer values */
        u32 sge_timer_value_2_and_3;
        u32 sge_timer_value_4_and_5;
index e984fdc48ba2e326a738e6f5bfa14e1bfda73f02..1e896b923234f6d1b2e21da52fd73b010d45f214 100644 (file)
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
        sge_params->sge_timer_value_2_and_3 = vals[5];
        sge_params->sge_timer_value_4_and_5 = vals[6];
 
+       /* T4 uses a single control field to specify both the PCIe Padding and
+        * Packing Boundary.  T5 introduced the ability to specify these
+        * separately with the Padding Boundary in SGE_CONTROL and and Packing
+        * Boundary in SGE_CONTROL2.  So for T5 and later we need to grab
+        * SGE_CONTROL in order to determine how ingress packet data will be
+        * laid out in Packed Buffer Mode.  Unfortunately, older versions of
+        * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
+        * failure grabbing it we throw an error since we can't figure out the
+        * right value.
+        */
+       if (!is_t4(adapter->params.chip)) {
+               params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                            FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
+               v = t4vf_query_params(adapter, 1, params, vals);
+               if (v != FW_SUCCESS) {
+                       dev_err(adapter->pdev_dev,
+                               "Unable to get SGE Control2; "
+                               "probably old firmware.\n");
+                       return v;
+               }
+               sge_params->sge_control2 = vals[0];
+       }
+
        params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                     FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
-       v = t4vf_query_params(adapter, 1, params, vals);
+       params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
+       v = t4vf_query_params(adapter, 2, params, vals);
        if (v)
                return v;
        sge_params->sge_ingress_rx_threshold = vals[0];
+       sge_params->sge_congestion_control = vals[1];
 
        return 0;
 }
index 180e53fa628face1c64b5306afdf5c95aaca89a6..73cf1653a4a3d4225951334e26506e89145cd1e4 100644 (file)
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        struct vnic_rq_buf *buf = rq->to_use;
 
        if (buf->os_buf) {
-               buf = buf->next;
-               rq->to_use = buf;
-               rq->ring.desc_avail--;
-               if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
-                       /* Adding write memory barrier prevents compiler and/or
-                        * CPU reordering, thus avoiding descriptor posting
-                        * before descriptor is initialized. Otherwise, hardware
-                        * can read stale descriptor fields.
-                        */
-                       wmb();
-                       iowrite32(buf->index, &rq->ctrl->posted_index);
-               }
+               enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+                                  buf->len);
 
                return 0;
        }
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                                enic->rq_truncated_pkts++;
                }
 
+               pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+                                PCI_DMA_FROMDEVICE);
                dev_kfree_skb_any(skb);
+               buf->os_buf = NULL;
 
                return;
        }
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                /* Buffer overflow
                 */
 
+               pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+                                PCI_DMA_FROMDEVICE);
                dev_kfree_skb_any(skb);
+               buf->os_buf = NULL;
        }
 }
 
index 50a851db2852e28979a0b78f481347fc9e29778a..3dca494797bd2ebb8a7dd6c1983b7f19e72e481c 100644 (file)
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
        return bufaddr;
 }
 
+static void swap_buffer2(void *dst_buf, void *src_buf, int len)
+{
+       int i;
+       unsigned int *src = src_buf;
+       unsigned int *dst = dst_buf;
+
+       for (i = 0; i < len; i += 4, src++, dst++)
+               *dst = swab32p(src);
+}
+
 static void fec_dump(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
 }
 
 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
-                              struct bufdesc *bdp, u32 length)
+                              struct bufdesc *bdp, u32 length, bool swap)
 {
        struct  fec_enet_private *fep = netdev_priv(ndev);
        struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
        dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
                                FEC_ENET_RX_FRSIZE - fep->rx_align,
                                DMA_FROM_DEVICE);
-       memcpy(new_skb->data, (*skb)->data, length);
+       if (!swap)
+               memcpy(new_skb->data, (*skb)->data, length);
+       else
+               swap_buffer2(new_skb->data, (*skb)->data, length);
        *skb = new_skb;
 
        return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
        u16     vlan_tag;
        int     index = 0;
        bool    is_copybreak;
+       bool    need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
 
 #ifdef CONFIG_M532x
        flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                 * include that when passing upstream as it messes up
                 * bridging applications.
                 */
-               is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
+               is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
+                                                 need_swap);
                if (!is_copybreak) {
                        skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
                        if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                prefetch(skb->data - NET_IP_ALIGN);
                skb_put(skb, pkt_len - 4);
                data = skb->data;
-               if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+               if (!is_copybreak && need_swap)
                        swap_buffer(data, pkt_len);
 
                /* Extract the enhanced buffer descriptor */
@@ -3343,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
                netif_device_detach(ndev);
                netif_tx_unlock_bh(ndev);
                fec_stop(ndev);
+               fec_enet_clk_enable(ndev, false);
+               pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        }
        rtnl_unlock();
 
-       fec_enet_clk_enable(ndev, false);
-       pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 
@@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
                        return ret;
        }
 
-       pinctrl_pm_select_default_state(&fep->pdev->dev);
-       ret = fec_enet_clk_enable(ndev, true);
-       if (ret)
-               goto failed_clk;
-
        rtnl_lock();
        if (netif_running(ndev)) {
+               pinctrl_pm_select_default_state(&fep->pdev->dev);
+               ret = fec_enet_clk_enable(ndev, true);
+               if (ret) {
+                       rtnl_unlock();
+                       goto failed_clk;
+               }
                fec_restart(ndev);
                netif_tx_lock_bh(ndev);
                netif_device_attach(ndev);
index d47b19f27c355c96ea006c004afa38efac3dcfe7..28b81ae09b5ae2288e8d05fac82176a8d504fd18 100644 (file)
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
  **/
 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 {
-       s32 status;
        u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
        bool autoneg = false;
        ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 
        hw->phy.ops.write_reg(hw, MDIO_CTRL1,
                              MDIO_MMD_AN, autoneg_reg);
-
-       return status;
+       return 0;
 }
 
 /**
index b151a949f352a20ec8e74b4f3a7b6bb194ce841c..d44560d1d268caae42143b674535db45c2784289 100644 (file)
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                int tx_index;
                struct tx_desc *desc;
                u32 cmd_sts;
-               struct sk_buff *skb;
 
                tx_index = txq->tx_used_desc;
                desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                reclaimed++;
                txq->tx_desc_count--;
 
-               skb = NULL;
-               if (cmd_sts & TX_LAST_DESC)
-                       skb = __skb_dequeue(&txq->tx_skb);
+               if (!IS_TSO_HEADER(txq, desc->buf_ptr))
+                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+                                        desc->byte_cnt, DMA_TO_DEVICE);
+
+               if (cmd_sts & TX_ENABLE_INTERRUPT) {
+                       struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
+
+                       if (!WARN_ON(!skb))
+                               dev_kfree_skb(skb);
+               }
 
                if (cmd_sts & ERROR_SUMMARY) {
                        netdev_info(mp->dev, "tx error\n");
                        mp->dev->stats.tx_errors++;
                }
 
-               if (!IS_TSO_HEADER(txq, desc->buf_ptr))
-                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
-                                        desc->byte_cnt, DMA_TO_DEVICE);
-               dev_kfree_skb(skb);
        }
 
        __netif_tx_unlock_bh(nq);
index ece83f101526de02fac0af5ccf0f85c88d440670..fdf3e382e4649313b677fa8c164e1d2430130f3b 100644 (file)
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
 {
        struct mvpp2_prs_entry *pe;
        int tid_aux, tid;
+       int ret = 0;
 
        pe = mvpp2_prs_vlan_find(priv, tpid, ai);
 
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
                                break;
                }
 
-               if (tid <= tid_aux)
-                       return -EINVAL;
+               if (tid <= tid_aux) {
+                       ret = -EINVAL;
+                       goto error;
+               }
 
                memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
 
        mvpp2_prs_hw_write(priv, pe);
 
+error:
        kfree(pe);
 
-       return 0;
+       return ret;
 }
 
 /* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
                                     unsigned int port_map)
 {
        struct mvpp2_prs_entry *pe;
-       int tid_aux, tid, ai;
+       int tid_aux, tid, ai, ret = 0;
 
        pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
 
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
 
                /* Set ai value for new double vlan entry */
                ai = mvpp2_prs_double_vlan_ai_free_get(priv);
-               if (ai < 0)
-                       return ai;
+               if (ai < 0) {
+                       ret = ai;
+                       goto error;
+               }
 
                /* Get first single/triple vlan tid */
                for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
                                break;
                }
 
-               if (tid >= tid_aux)
-                       return -ERANGE;
+               if (tid >= tid_aux) {
+                       ret = -ERANGE;
+                       goto error;
+               }
 
                memset(pe, 0, sizeof(struct mvpp2_prs_entry));
                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
        mvpp2_prs_tcam_port_map_set(pe, port_map);
        mvpp2_prs_hw_write(priv, pe);
 
+error:
        kfree(pe);
-       return 0;
+       return ret;
 }
 
 /* IPv4 header parsing for fragmentation and L4 offset */
index f3032fec8fce03767580bd555a4760e8d7faba1e..02266e3de514f21c263ea91e9d3555475dc95099 100644 (file)
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
        ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
                                  VXLAN_STEER_BY_OUTER_MAC, 1);
 out:
-       if (ret)
+       if (ret) {
                en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+               return;
+       }
+
+       /* set offloads */
+       priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+       priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+       priv->dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
 }
 
 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
        int ret;
        struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
                                                 vxlan_del_task);
+       /* unset offloads */
+       priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+       priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+       priv->dev->features    &= ~NETIF_F_GSO_UDP_TUNNEL;
 
        ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
                                  VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2568,13 +2581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
                dev->priv_flags |= IFF_UNICAST_FLT;
 
-       if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
-               dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                                       NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
-               dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-               dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
-       }
-
        mdev->pndev[port] = dev;
 
        netif_carrier_off(dev);
index a278238a2db643ba04b924dd9920a284abcd8908..ad2c96a02a5352caec2d8649c714c3616be98ea8 100644 (file)
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
        snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
                 name, pci_name(dev->pdev));
        eq->eqn = out.eq_number;
+       eq->irqn = vecidx;
+       eq->dev = dev;
+       eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
        err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
                          eq->name, eq);
        if (err)
                goto err_eq;
 
-       eq->irqn = vecidx;
-       eq->dev = dev;
-       eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-
        err = mlx5_debug_eq_add(dev, eq);
        if (err)
                goto err_irq;
index 3d8e8e489b2ddb3539a4a9c89b784974c833e50e..71b10b210792e316582d0280d87f92cbee6bb2d8 100644 (file)
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
        dev->profile = &profile[prof_sel];
        dev->event = mlx5_core_event;
 
+       INIT_LIST_HEAD(&priv->ctx_list);
+       spin_lock_init(&priv->ctx_lock);
        err = mlx5_dev_init(dev, pdev);
        if (err) {
                dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
                goto out;
        }
 
-       INIT_LIST_HEAD(&priv->ctx_list);
-       spin_lock_init(&priv->ctx_lock);
        err = mlx5_register_device(dev);
        if (err) {
                dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
index 0b2a1ccd276dbd4c1e582ef635b6304c351d03d7..613037584d08e785ef2700ca1d2221b50b256e9c 100644 (file)
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
        if (test_bit(__NX_RESETTING, &adapter->state))
                goto reschedule;
 
-       if (test_bit(__NX_DEV_UP, &adapter->state)) {
+       if (test_bit(__NX_DEV_UP, &adapter->state) &&
+           !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
                if (!adapter->has_link_events) {
 
                        netxen_nic_handle_phy_intr(adapter);
index f3a47147937d46b3a7aa5955b6e9729d17597a99..9a49f42ac2ba4a76e743f1c400f9f4bd69850a6a 100644 (file)
@@ -5,7 +5,6 @@
 config NET_VENDOR_QUALCOMM
        bool "Qualcomm devices"
        default y
-       depends on SPI_MASTER && OF_GPIO
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
 
 config QCA7000
        tristate "Qualcomm Atheros QCA7000 support"
-       depends on SPI_MASTER && OF_GPIO
+       depends on SPI_MASTER && OF
        ---help---
          This SPI protocol driver supports the Qualcomm Atheros QCA7000.
 
index 002d4cdc319fda80a064c7cea199e1bf417d479c..a77f05ce832596d0c8eb10ef3a9735f48ba64b55 100644 (file)
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
                      EFX_MAX_CHANNELS,
                      resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
                      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
-       BUG_ON(efx->max_channels == 0);
+       if (WARN_ON(efx->max_channels == 0))
+               return -EIO;
 
        nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
        if (!nic_data)
index 2c62208077fe7a442aa1b5340e0e614d3b247c21..6cc3cf6f17c846cdcf900d47f17b40749dfb5966 100644 (file)
@@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
        const struct of_device_id *match = NULL;
        struct smc_local *lp;
        struct net_device *ndev;
-       struct resource *res, *ires;
+       struct resource *res;
        unsigned int __iomem *addr;
        unsigned long irq_flags = SMC_IRQ_FLAGS;
+       unsigned long irq_resflags;
        int ret;
 
        ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
                goto out_free_netdev;
        }
 
-       ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!ires) {
+       ndev->irq = platform_get_irq(pdev, 0);
+       if (ndev->irq <= 0) {
                ret = -ENODEV;
                goto out_release_io;
        }
-
-       ndev->irq = ires->start;
-
-       if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
-               irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+       /*
+        * If this platform does not specify any special irqflags, or if
+        * the resource supplies a trigger, override the irqflags with
+        * the trigger flags from the resource.
+        */
+       irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+       if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
+               irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
 
        ret = smc_request_attrib(pdev, ndev);
        if (ret)
index affb29da353e29139fb758c5a5ce68b701479815..77ed74561e5fe815de9c7efdd1477404d3591b81 100644 (file)
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
        spin_unlock(&pdata->mac_lock);
 }
 
+static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
+{
+       int rc = 0;
+
+       if (!pdata->phy_dev)
+               return rc;
+
+       /* If the internal PHY is in General Power-Down mode, all, except the
+        * management interface, is powered-down and stays in that condition as
+        * long as Phy register bit 0.11 is HIGH.
+        *
+        * In that case, clear the bit 0.11, so the PHY powers up and we can
+        * access to the phy registers.
+        */
+       rc = phy_read(pdata->phy_dev, MII_BMCR);
+       if (rc < 0) {
+               SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+               return rc;
+       }
+
+       /* If the PHY general power-down bit is not set is not necessary to
+        * disable the general power down-mode.
+        */
+       if (rc & BMCR_PDOWN) {
+               rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+               if (rc < 0) {
+                       SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+                       return rc;
+               }
+
+               usleep_range(1000, 1500);
+       }
+
+       return 0;
+}
+
 static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
 {
        int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
                return rc;
        }
 
-       /*
-        * If energy is detected the PHY is already awake so is not necessary
-        * to disable the energy detect power-down mode.
-        */
-       if ((rc & MII_LAN83C185_EDPWRDOWN) &&
-           !(rc & MII_LAN83C185_ENERGYON)) {
+       /* Only disable if energy detect mode is already enabled */
+       if (rc & MII_LAN83C185_EDPWRDOWN) {
                /* Disable energy detect mode for this SMSC Transceivers */
                rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
                               rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
                        SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
                        return rc;
                }
-
-               mdelay(1);
+               /* Allow PHY to wakeup */
+               mdelay(2);
        }
 
        return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
 
        /* Only enable if energy detect mode is already disabled */
        if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
-               mdelay(100);
                /* Enable energy detect mode for this SMSC Transceivers */
                rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
                               rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
                        SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
                        return rc;
                }
-
-               mdelay(1);
        }
        return 0;
 }
@@ -1414,6 +1443,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
        unsigned int temp;
        int ret;
 
+       /*
+        * Make sure to power-up the PHY chip before doing a reset, otherwise
+        * the reset fails.
+        */
+       ret = smsc911x_phy_general_power_up(pdata);
+       if (ret) {
+               SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
+               return ret;
+       }
+
        /*
         * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
         * are initialized in a Energy Detect Power-Down mode that prevents
index 6f77a46c7e2c446da3b3b4ae139057a621ef00ef..18c46bb0f3bfa296096ab2ec6624a4bd27bd304f 100644 (file)
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
        char *phy_bus_name = priv->plat->phy_bus_name;
+       unsigned long flags;
        bool ret = false;
 
        /* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                         * changed).
                         * In that case the driver disable own timers.
                         */
+                       spin_lock_irqsave(&priv->lock, flags);
                        if (priv->eee_active) {
                                pr_debug("stmmac: disable EEE\n");
                                del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                                                             tx_lpi_timer);
                        }
                        priv->eee_active = 0;
+                       spin_unlock_irqrestore(&priv->lock, flags);
                        goto out;
                }
                /* Activate the EEE and start timers */
+               spin_lock_irqsave(&priv->lock, flags);
                if (!priv->eee_active) {
                        priv->eee_active = 1;
                        init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                /* Set HW EEE according to the speed */
                priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
 
-               pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
-
                ret = true;
+               spin_unlock_irqrestore(&priv->lock, flags);
+
+               pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
        }
 out:
        return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
 
+       spin_unlock_irqrestore(&priv->lock, flags);
+
        /* At this stage, it could be needed to setup the EEE or adjust some
         * MAC related HW registers.
         */
        priv->eee_enabled = stmmac_eee_init(priv);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 /**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
 }
 
 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
-                                 int i)
+                                 int i, gfp_t flags)
 {
        struct sk_buff *skb;
 
        skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
-                                GFP_KERNEL);
+                                flags);
        if (!skb) {
                pr_err("%s: Rx init fails; skb is NULL\n", __func__);
                return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
  * and allocates the socket buffers. It suppors the chained and ring
  * modes.
  */
-static int init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
 {
        int i;
        struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
                else
                        p = priv->dma_rx + i;
 
-               ret = stmmac_init_rx_buffers(priv, p, i);
+               ret = stmmac_init_rx_buffers(priv, p, i, flags);
                if (ret)
                        goto err_init_rx_buffers;
 
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret;
 
-       ret = init_dma_desc_rings(dev);
-       if (ret < 0) {
-               pr_err("%s: DMA descriptors initialization failed\n", __func__);
-               return ret;
-       }
        /* DMA initialization and SW reset */
        ret = stmmac_init_dma_engine(priv);
        if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
        }
        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
-       priv->eee_enabled = stmmac_eee_init(priv);
-
-       stmmac_init_tx_coalesce(priv);
-
        if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
                priv->rx_riwt = MAX_DMA_RIWT;
                priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
                goto dma_desc_error;
        }
 
+       ret = init_dma_desc_rings(dev, GFP_KERNEL);
+       if (ret < 0) {
+               pr_err("%s: DMA descriptors initialization failed\n", __func__);
+               goto init_error;
+       }
+
        ret = stmmac_hw_setup(dev);
        if (ret < 0) {
                pr_err("%s: Hw setup failed\n", __func__);
                goto init_error;
        }
 
+       stmmac_init_tx_coalesce(priv);
+
        if (priv->phydev)
                phy_start(priv->phydev);
 
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int nopaged_len = skb_headlen(skb);
        unsigned int enh_desc = priv->plat->enh_desc;
 
+       spin_lock(&priv->tx_lock);
+
        if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+               spin_unlock(&priv->tx_lock);
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
                        /* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       spin_lock(&priv->tx_lock);
-
        if (priv->tx_path_in_lpi_mode)
                stmmac_disable_eee_mode(priv);
 
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 dma_map_err:
+       spin_unlock(&priv->tx_lock);
        dev_err(priv->device, "Tx dma map failed\n");
        dev_kfree_skb(skb);
        priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       spin_lock(&priv->lock);
        priv->hw->mac->set_filter(priv->hw, dev);
-       spin_unlock(&priv->lock);
 }
 
 /**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
                stmmac_set_mac(priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
-               clk_disable_unprepare(priv->stmmac_clk);
+               clk_disable(priv->stmmac_clk);
        }
        spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
        } else {
                pinctrl_pm_select_default_state(priv->device);
                /* enable the clk prevously disabled */
-               clk_prepare_enable(priv->stmmac_clk);
+               clk_enable(priv->stmmac_clk);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
 
        netif_device_attach(ndev);
 
+       init_dma_desc_rings(ndev, GFP_ATOMIC);
        stmmac_hw_setup(ndev);
+       stmmac_init_tx_coalesce(priv);
 
        napi_enable(&priv->napi);
 
index 72c8525d5457b40af75c5c15d6b4bd1432185543..9c014803b03b2338ad2f97fd31794966cfc02b66 100644 (file)
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
        HMD(("init rxring, "));
        for (i = 0; i < RX_RING_SIZE; i++) {
                struct sk_buff *skb;
+               u32 mapping;
 
                skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
                if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
 
                /* Because we reserve afterwards. */
                skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+               mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+                                        DMA_FROM_DEVICE);
+               if (dma_mapping_error(hp->dma_dev, mapping)) {
+                       dev_kfree_skb_any(skb);
+                       hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+                       continue;
+               }
                hme_write_rxd(hp, &hb->happy_meal_rxd[i],
                              (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
-                             dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
-                                            DMA_FROM_DEVICE));
+                             mapping);
                skb_reserve(skb, RX_OFFSET);
        }
 
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
                skb = hp->rx_skbs[elem];
                if (len > RX_COPY_THRESHOLD) {
                        struct sk_buff *new_skb;
+                       u32 mapping;
 
                        /* Now refill the entry, if we can. */
                        new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
                                drops++;
                                goto drop_it;
                        }
+                       skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+                       mapping = dma_map_single(hp->dma_dev, new_skb->data,
+                                                RX_BUF_ALLOC_SIZE,
+                                                DMA_FROM_DEVICE);
+                       if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+                               dev_kfree_skb_any(new_skb);
+                               drops++;
+                               goto drop_it;
+                       }
+
                        dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
                        hp->rx_skbs[elem] = new_skb;
-                       skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
                        hme_write_rxd(hp, this,
                                      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
-                                     dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
-                                                    DMA_FROM_DEVICE));
+                                     mapping);
                        skb_reserve(new_skb, RX_OFFSET);
 
                        /* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
+static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
+                                u32 first_len, u32 first_entry, u32 entry)
+{
+       struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+
+       dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
+
+       first_entry = NEXT_TX(first_entry);
+       while (first_entry != entry) {
+               struct happy_meal_txd *this = &txbase[first_entry];
+               u32 addr, len;
+
+               addr = hme_read_desc32(hp, &this->tx_addr);
+               len = hme_read_desc32(hp, &this->tx_flags);
+               len &= TXFLAG_SIZE;
+               dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
+       }
+}
+
 static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
                                         struct net_device *dev)
 {
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
 
                len = skb->len;
                mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
+                       goto out_dma_error;
                tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
                hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
                              (tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
                first_len = skb_headlen(skb);
                first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
                                               DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
+                       goto out_dma_error;
                entry = NEXT_TX(entry);
 
                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
                        len = skb_frag_size(this_frag);
                        mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
                                                   0, len, DMA_TO_DEVICE);
+                       if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+                               unmap_partial_tx_skb(hp, first_mapping, first_len,
+                                                    first_entry, entry);
+                               goto out_dma_error;
+                       }
                        this_txflags = tx_flags;
                        if (frag == skb_shinfo(skb)->nr_frags - 1)
                                this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
 
        tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
        return NETDEV_TX_OK;
+
+out_dma_error:
+       hp->tx_skbs[hp->tx_new] = NULL;
+       spin_unlock_irq(&hp->happy_lock);
+
+       dev_kfree_skb_any(skb);
+       dev->stats.tx_dropped++;
+       return NETDEV_TX_OK;
 }
 
 static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
index 3ae83879a75f5eeb0c44cafcf362a661f1cb86ac..097ebe7077ac0c8de51e3eb7e8da5809f5e6bcea 100644 (file)
@@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
 {
        if (!ale)
                return -EINVAL;
-       cpsw_ale_stop(ale);
        cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
        kfree(ale);
        return 0;
index ab92f67da035f2f5f9aaa8ea4effa87ca83a2a41..4a4388b813ac6b2392917da20f873ceb34297e39 100644 (file)
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
 
        switch (ptp_class & PTP_CLASS_PMASK) {
        case PTP_CLASS_IPV4:
-               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
                break;
        case PTP_CLASS_IPV6:
                offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
index 6f226de655a40759874356c38352b928d1e425f3..880cc090dc44ecb0167682e951bedff7753aef94 100644 (file)
@@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
                vnet_hdr->csum_start = skb_checksum_start_offset(skb);
+               if (vlan_tx_tag_present(skb))
+                       vnet_hdr->csum_start += VLAN_HLEN;
                vnet_hdr->csum_offset = skb->csum_offset;
        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
index 2954052706e8bcbf0979e89772326656ba6c57fa..e22e602beef3426a600db641a66237f40c039732 100644 (file)
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
 
        switch (type & PTP_CLASS_PMASK) {
        case PTP_CLASS_IPV4:
-               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
                break;
        case PTP_CLASS_IPV6:
                offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
 
        switch (type & PTP_CLASS_PMASK) {
        case PTP_CLASS_IPV4:
-               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
                break;
        case PTP_CLASS_IPV6:
                offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
index 1dfffdc9dfc3577bd6893688da191c38539e243f..767cd110f49688d2b4118ba4dffc1a373972e56d 100644 (file)
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 {
        struct mii_ioctl_data *mii_data = if_mii(ifr);
        u16 val = mii_data->val_in;
+       bool change_autoneg = false;
 
        switch (cmd) {
        case SIOCGMIIPHY:
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
                if (mii_data->phy_id == phydev->addr) {
                        switch (mii_data->reg_num) {
                        case MII_BMCR:
-                               if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
+                               if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
+                                       if (phydev->autoneg == AUTONEG_ENABLE)
+                                               change_autoneg = true;
                                        phydev->autoneg = AUTONEG_DISABLE;
-                               else
+                                       if (val & BMCR_FULLDPLX)
+                                               phydev->duplex = DUPLEX_FULL;
+                                       else
+                                               phydev->duplex = DUPLEX_HALF;
+                                       if (val & BMCR_SPEED1000)
+                                               phydev->speed = SPEED_1000;
+                                       else if (val & BMCR_SPEED100)
+                                               phydev->speed = SPEED_100;
+                                       else phydev->speed = SPEED_10;
+                               }
+                               else {
+                                       if (phydev->autoneg == AUTONEG_DISABLE)
+                                               change_autoneg = true;
                                        phydev->autoneg = AUTONEG_ENABLE;
-                               if (!phydev->autoneg && (val & BMCR_FULLDPLX))
-                                       phydev->duplex = DUPLEX_FULL;
-                               else
-                                       phydev->duplex = DUPLEX_HALF;
-                               if (!phydev->autoneg && (val & BMCR_SPEED1000))
-                                       phydev->speed = SPEED_1000;
-                               else if (!phydev->autoneg &&
-                                        (val & BMCR_SPEED100))
-                                       phydev->speed = SPEED_100;
+                               }
                                break;
                        case MII_ADVERTISE:
-                               phydev->advertising = val;
+                               phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+                               change_autoneg = true;
                                break;
                        default:
                                /* do nothing */
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
                if (mii_data->reg_num == MII_BMCR &&
                    val & BMCR_RESET)
                        return phy_init_hw(phydev);
+
+               if (change_autoneg)
+                       return phy_start_aneg(phydev);
+
                return 0;
 
        case SIOCSHWTSTAMP:
index 68c3a3f4e0abe54910923aca8aa64286366e131a..794a4732936883c662212e0769e62993e67d96b0 100644 (file)
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                err = get_filter(argp, &code);
                if (err >= 0) {
+                       struct bpf_prog *pass_filter = NULL;
                        struct sock_fprog_kern fprog = {
                                .len = err,
                                .filter = code,
                        };
 
-                       ppp_lock(ppp);
-                       if (ppp->pass_filter) {
-                               bpf_prog_destroy(ppp->pass_filter);
-                               ppp->pass_filter = NULL;
+                       err = 0;
+                       if (fprog.filter)
+                               err = bpf_prog_create(&pass_filter, &fprog);
+                       if (!err) {
+                               ppp_lock(ppp);
+                               if (ppp->pass_filter)
+                                       bpf_prog_destroy(ppp->pass_filter);
+                               ppp->pass_filter = pass_filter;
+                               ppp_unlock(ppp);
                        }
-                       if (fprog.filter != NULL)
-                               err = bpf_prog_create(&ppp->pass_filter,
-                                                     &fprog);
-                       else
-                               err = 0;
                        kfree(code);
-                       ppp_unlock(ppp);
                }
                break;
        }
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                err = get_filter(argp, &code);
                if (err >= 0) {
+                       struct bpf_prog *active_filter = NULL;
                        struct sock_fprog_kern fprog = {
                                .len = err,
                                .filter = code,
                        };
 
-                       ppp_lock(ppp);
-                       if (ppp->active_filter) {
-                               bpf_prog_destroy(ppp->active_filter);
-                               ppp->active_filter = NULL;
+                       err = 0;
+                       if (fprog.filter)
+                               err = bpf_prog_create(&active_filter, &fprog);
+                       if (!err) {
+                               ppp_lock(ppp);
+                               if (ppp->active_filter)
+                                       bpf_prog_destroy(ppp->active_filter);
+                               ppp->active_filter = active_filter;
+                               ppp_unlock(ppp);
                        }
-                       if (fprog.filter != NULL)
-                               err = bpf_prog_create(&ppp->active_filter,
-                                                     &fprog);
-                       else
-                               err = 0;
                        kfree(code);
-                       ppp_unlock(ppp);
                }
                break;
        }
index 7302398f0b1fff89ffe4a4e373e936c928180645..9dd3746994a42cb73c585848876ff3e878963f24 100644 (file)
@@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
        struct tun_pi pi = { 0, skb->protocol };
        ssize_t total = 0;
        int vlan_offset = 0, copied;
+       int vlan_hlen = 0;
+       int vnet_hdr_sz = 0;
+
+       if (vlan_tx_tag_present(skb))
+               vlan_hlen = VLAN_HLEN;
+
+       if (tun->flags & TUN_VNET_HDR)
+               vnet_hdr_sz = tun->vnet_hdr_sz;
 
        if (!(tun->flags & TUN_NO_PI)) {
                if ((len -= sizeof(pi)) < 0)
                        return -EINVAL;
 
-               if (len < skb->len) {
+               if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
                        /* Packet will be striped */
                        pi.flags |= TUN_PKT_STRIP;
                }
@@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                total += sizeof(pi);
        }
 
-       if (tun->flags & TUN_VNET_HDR) {
+       if (vnet_hdr_sz) {
                struct virtio_net_hdr gso = { 0 }; /* no info leak */
-               if ((len -= tun->vnet_hdr_sz) < 0)
+               if ((len -= vnet_hdr_sz) < 0)
                        return -EINVAL;
 
                if (skb_is_gso(skb)) {
@@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       gso.csum_start = skb_checksum_start_offset(skb);
+                       gso.csum_start = skb_checksum_start_offset(skb) +
+                                        vlan_hlen;
                        gso.csum_offset = skb->csum_offset;
                } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                        gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
                                               sizeof(gso))))
                        return -EFAULT;
-               total += tun->vnet_hdr_sz;
+               total += vnet_hdr_sz;
        }
 
        copied = total;
-       total += skb->len;
-       if (!vlan_tx_tag_present(skb)) {
-               len = min_t(int, skb->len, len);
-       } else {
+       len = min_t(int, skb->len + vlan_hlen, len);
+       total += skb->len + vlan_hlen;
+       if (vlan_hlen) {
                int copy, ret;
                struct {
                        __be16 h_vlan_proto;
@@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
 
                vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
-               len = min_t(int, skb->len + VLAN_HLEN, len);
-               total += VLAN_HLEN;
 
                copy = min_t(int, vlan_offset, len);
                ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
index 2c05f6cdb12f3a1e2ae9bed29c6241cee7dc3a46..816d511e34d33065a80cd171bab45ec25b8af3db 100644 (file)
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
                return ret;
        }
 
-       ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
-       if (ret < 0)
-               return ret;
-
-       msleep(150);
-
-       ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
-       if (ret < 0)
-               return ret;
-
-       msleep(150);
-
-       ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+       ax88772_reset(dev);
 
        /* Read PHYID register *AFTER* the PHY was reset properly */
        phyid = asix_get_phyid(dev);
index ca309820d39e1ba7995f38d3a2f9bacbd1c1f857..fa9dc45b75a6f9f7fb04e25c61fa3eb732d10af6 100644 (file)
@@ -275,13 +275,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
        return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
 }
 
-/* Find VXLAN socket based on network namespace and UDP port */
-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
+/* Find VXLAN socket based on network namespace, address family and UDP port */
+static struct vxlan_sock *vxlan_find_sock(struct net *net,
+                                         sa_family_t family, __be16 port)
 {
        struct vxlan_sock *vs;
 
        hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
-               if (inet_sk(vs->sock->sk)->inet_sport == port)
+               if (inet_sk(vs->sock->sk)->inet_sport == port &&
+                   inet_sk(vs->sock->sk)->sk.sk_family == family)
                        return vs;
        }
        return NULL;
@@ -300,11 +302,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
 }
 
 /* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
+                                       sa_family_t family, __be16 port)
 {
        struct vxlan_sock *vs;
 
-       vs = vxlan_find_sock(net, port);
+       vs = vxlan_find_sock(net, family, port);
        if (!vs)
                return NULL;
 
@@ -621,6 +624,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
        int vxlan_len  = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
        int err = -ENOSYS;
 
+       udp_tunnel_gro_complete(skb, nhoff);
+
        eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
        type = eh->h_proto;
 
@@ -1771,7 +1776,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        ip_rt_put(rt);
-                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+                                                  dst->sa.sa_family, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1825,7 +1831,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        dst_release(ndst);
-                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+                                                  dst->sa.sa_family, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1985,13 +1992,15 @@ static int vxlan_init(struct net_device *dev)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs;
+       bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
 
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
+       vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
+                            vxlan->dst_port);
        if (vs) {
                /* If we have a socket with same port already, reuse it */
                atomic_inc(&vs->refcnt);
@@ -2382,6 +2391,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_sock *vs;
+       bool ipv6 = flags & VXLAN_F_IPV6;
 
        vs = vxlan_socket_create(net, port, rcv, data, flags);
        if (!IS_ERR(vs))
@@ -2391,7 +2401,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                return vs;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(net, port);
+       vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
        if (vs) {
                if (vs->rcv == rcv)
                        atomic_inc(&vs->refcnt);
@@ -2550,7 +2560,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
                vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
-       if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
+       if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+                          vxlan->dst_port)) {
                pr_info("duplicate VNI %u\n", vni);
                return -EEXIST;
        }
index e0d9f19650b07e5df8b56342ec5e6d1bef88194f..eb03943f846326207061a2ed63e95df09af93caf 100644 (file)
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (WARN_ON_ONCE(mvm->init_ucode_complete))
+       if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
                return 0;
 
        iwl_init_notification_wait(&mvm->notif_wait,
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                goto out;
        }
 
+       mvm->calibrating = true;
+
        /* Send TX valid antennas before triggering calibrations */
        ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
        if (ret)
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                        MVM_UCODE_CALIB_TIMEOUT);
        if (!ret)
                mvm->init_ucode_complete = true;
+
+       if (ret && iwl_mvm_is_radio_killed(mvm)) {
+               IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+               ret = 1;
+       }
        goto out;
 
 error:
        iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 out:
+       mvm->calibrating = false;
        if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
                /* we want to debug INIT and we have no NVM - fake */
                mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
index 585fe5b7100fb8750bed29eda65d91121422497c..b62405865b25cd185c560731d54ab36304c54812 100644 (file)
@@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 
        mvm->scan_status = IWL_MVM_SCAN_NONE;
        mvm->ps_disabled = false;
+       mvm->calibrating = false;
 
        /* just in case one was running */
        ieee80211_remain_on_channel_expired(mvm->hw);
index b153ced7015bfef8984a9ecb78f844d4533ed7d9..845429c88cf403fdaffee73951fa99251b619137 100644 (file)
@@ -548,6 +548,7 @@ struct iwl_mvm {
        enum iwl_ucode_type cur_ucode;
        bool ucode_loaded;
        bool init_ucode_complete;
+       bool calibrating;
        u32 error_event_table;
        u32 log_event_table;
        u32 umac_error_event_table;
index 48cb25a93591a44678fd051e390dc15c02078744..5b719ee8e789075e6e7517512ca311cb6f9b6121 100644 (file)
@@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        }
        mvm->sf_state = SF_UNINIT;
        mvm->low_latency_agg_frame_limit = 6;
+       mvm->cur_ucode = IWL_UCODE_INIT;
 
        mutex_init(&mvm->mutex);
        mutex_init(&mvm->d0i3_suspend_mutex);
@@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       bool calibrating = ACCESS_ONCE(mvm->calibrating);
 
        if (state)
                set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 
        wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
 
-       return state && mvm->cur_ucode != IWL_UCODE_INIT;
+       /* iwl_run_init_mvm_ucode is waiting for results, abort it */
+       if (calibrating)
+               iwl_abort_notification_waits(&mvm->notif_wait);
+
+       /*
+        * Stop the device if we run OPERATIONAL firmware or if we are in the
+        * middle of the calibrations.
+        */
+       return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
 }
 
 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
index 3781b029e54a328f6304bfb6cf61d52c053cbce7..160c3ebc48d0b2c938c04718f5709407c7a2bc16 100644 (file)
@@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
         * restart. So don't process again if the device is
         * already dead.
         */
-       if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+       if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+               IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
                iwl_pcie_tx_stop(trans);
                iwl_pcie_rx_stop(trans);
 
@@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
        /* clear all status bits */
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
        clear_bit(STATUS_INT_ENABLED, &trans->status);
-       clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
        clear_bit(STATUS_TPOWER_PMI, &trans->status);
        clear_bit(STATUS_RFKILL, &trans->status);
 
index babbdc1ce741c62024bc6220fdcf2a7bd3487790..c9ad4cf1adfb4b1d7a30068c2546ac7354345139 100644 (file)
@@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
        if (err != 0) {
                printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
                       err);
-               goto failed_hw;
+               goto failed_bind;
        }
 
        skb_queue_head_init(&data->pending);
@@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
        return idx;
 
 failed_hw:
+       device_release_driver(data->dev);
+failed_bind:
        device_unregister(data->dev);
 failed_drvdata:
        ieee80211_free_hw(hw);
index 4dcfb7116a0487656bf489a45c6345c18ea1e219..a2eabe6ff9ada9b7bff3991a0116c8137a78b92d 100644 (file)
@@ -202,6 +202,7 @@ config TC1100_WMI
 config HP_ACCEL
        tristate "HP laptop accelerometer"
        depends on INPUT && ACPI
+       depends on SERIO_I8042
        select SENSORS_LIS3LV02D
        select NEW_LEDS
        select LEDS_CLASS
index 13e14ec1d3d7118c4e4ab63ac98a2ef0c1258118..6bec745b6b92dc6e3f849a12da003b639d1900ed 100644 (file)
@@ -37,6 +37,8 @@
 #include <linux/leds.h>
 #include <linux/atomic.h>
 #include <linux/acpi.h>
+#include <linux/i8042.h>
+#include <linux/serio.h>
 #include "../../misc/lis3lv02d/lis3lv02d.h"
 
 #define DRIVER_NAME     "hp_accel"
@@ -73,6 +75,13 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
 
 /* HP-specific accelerometer driver ------------------------------------ */
 
+/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
+ * HPQ6000 sends through the keyboard bus */
+#define ACCEL_1 0x25
+#define ACCEL_2 0x26
+#define ACCEL_3 0x27
+#define ACCEL_4 0x28
+
 /* For automatic insertion of the module */
 static const struct acpi_device_id lis3lv02d_device_ids[] = {
        {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
@@ -294,6 +303,35 @@ static void lis3lv02d_enum_resources(struct acpi_device *device)
                printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n");
 }
 
+static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
+                                 struct serio *port)
+{
+       static bool extended;
+
+       if (str & I8042_STR_AUXDATA)
+               return false;
+
+       if (data == 0xe0) {
+               extended = true;
+               return true;
+       } else if (unlikely(extended)) {
+               extended = false;
+
+               switch (data) {
+               case ACCEL_1:
+               case ACCEL_2:
+               case ACCEL_3:
+               case ACCEL_4:
+                       return true;
+               default:
+                       serio_interrupt(port, 0xe0, 0);
+                       return false;
+               }
+       }
+
+       return false;
+}
+
 static int lis3lv02d_add(struct acpi_device *device)
 {
        int ret;
@@ -326,6 +364,11 @@ static int lis3lv02d_add(struct acpi_device *device)
        if (ret)
                return ret;
 
+       /* filter to remove HPQ6000 accelerometer data
+        * from keyboard bus stream */
+       if (strstr(dev_name(&device->dev), "HPQ6000"))
+               i8042_install_filter(hp_accel_i8042_filter);
+
        INIT_WORK(&hpled_led.work, delayed_set_status_worker);
        ret = led_classdev_register(NULL, &hpled_led.led_classdev);
        if (ret) {
@@ -343,6 +386,7 @@ static int lis3lv02d_remove(struct acpi_device *device)
        if (!device)
                return -EINVAL;
 
+       i8042_remove_filter(hp_accel_i8042_filter);
        lis3lv02d_joystick_disable(&lis3_dev);
        lis3lv02d_poweroff(&lis3_dev);
 
index 6cbe6ef3c889d14841e2c73aa848fa9f563b68ec..bda52f18e9670aefe1a7757209fdf038c866ad5a 100644 (file)
@@ -888,7 +888,6 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
        struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
        int i;
        struct virtqueue *vq;
-       struct virtio_driver *drv;
 
        if (!vcdev)
                return;
index 461bf3d033a061833409c1fcc5d6538066d5016b..5a1f1070b702282bb1f4ded1535dbfd3abdb6963 100644 (file)
@@ -459,6 +459,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
        int measure_freq;
        int ret;
 
+       if (!cpufreq_get_current_driver()) {
+               dev_dbg(&pdev->dev, "no cpufreq driver!");
+               return -EPROBE_DEFER;
+       }
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -521,6 +525,30 @@ static int imx_thermal_probe(struct platform_device *pdev)
                return ret;
        }
 
+       data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(data->thermal_clk)) {
+               ret = PTR_ERR(data->thermal_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to get thermal clk: %d\n", ret);
+               cpufreq_cooling_unregister(data->cdev);
+               return ret;
+       }
+
+       /*
+        * Thermal sensor needs clk on to get correct value, normally
+        * we should enable its clk before taking measurement and disable
+        * clk after measurement is done, but if alarm function is enabled,
+        * hardware will auto measure the temperature periodically, so we
+        * need to keep the clk always on for alarm function.
+        */
+       ret = clk_prepare_enable(data->thermal_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+               cpufreq_cooling_unregister(data->cdev);
+               return ret;
+       }
+
        data->tz = thermal_zone_device_register("imx_thermal_zone",
                                                IMX_TRIP_NUM,
                                                BIT(IMX_TRIP_PASSIVE), data,
@@ -531,26 +559,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
                ret = PTR_ERR(data->tz);
                dev_err(&pdev->dev,
                        "failed to register thermal zone device %d\n", ret);
+               clk_disable_unprepare(data->thermal_clk);
                cpufreq_cooling_unregister(data->cdev);
                return ret;
        }
 
-       data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(data->thermal_clk)) {
-               dev_warn(&pdev->dev, "failed to get thermal clk!\n");
-       } else {
-               /*
-                * Thermal sensor needs clk on to get correct value, normally
-                * we should enable its clk before taking measurement and disable
-                * clk after measurement is done, but if alarm function is enabled,
-                * hardware will auto measure the temperature periodically, so we
-                * need to keep the clk always on for alarm function.
-                */
-               ret = clk_prepare_enable(data->thermal_clk);
-               if (ret)
-                       dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
-       }
-
        /* Enable measurements at ~ 10 Hz */
        regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
        measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
index d20dba986f0f614c19f4b2c7061f129ece26ab27..6e9fb62eb8170213149de3800cca3fa858b3a3e1 100644 (file)
@@ -92,7 +92,13 @@ static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
        if (ACPI_FAILURE(status))
                return -EIO;
 
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+       /*
+        * Thermal hysteresis represents a temperature difference.
+        * Kelvin and Celsius have same degree size. So the
+        * conversion here between tenths of degree Kelvin unit
+        * and Milli-Celsius unit is just to multiply 100.
+        */
+       *temp = hyst * 100;
 
        return 0;
 }
index 2683d2897e90bc6de18a5a367aa83342f401b00f..1724f6cdaef8f85603da7cbf99c225b0053dc055 100644 (file)
@@ -264,7 +264,6 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
 static const struct exynos_tmu_registers exynos5260_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
-       .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
        .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
        .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
index 65e2ea6a9579429b86b10260535af1366b33154d..63de598c9c2c3f9b8804110f086ed384991f547a 100644 (file)
@@ -75,7 +75,6 @@
 #define EXYNOS_MAX_TRIGGER_PER_REG     4
 
 /* Exynos5260 specific */
-#define EXYNOS_TMU_REG_CONTROL1                        0x24
 #define EXYNOS5260_TMU_REG_INTEN               0xC0
 #define EXYNOS5260_TMU_REG_INTSTAT             0xC4
 #define EXYNOS5260_TMU_REG_INTCLEAR            0xC8
index 659f2ea9e6f74741ecbe2a7e4c322f079352fb08..cefca661464b91a4edae048a8567e900d8560df6 100644 (file)
@@ -2638,7 +2638,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
 
        for (i = 0; i < CEPH_CAP_BITS; i++)
                if ((dirty & (1 << i)) &&
-                   flush_tid == ci->i_cap_flush_tid[i])
+                   (u16)flush_tid == ci->i_cap_flush_tid[i])
                        cleaned |= 1 << i;
 
        dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
index 9d3e9c50066aaf5856350cf3bc85576a79bab900..89326acd45615e50cc60c09cdd3531137b813138 100644 (file)
@@ -229,8 +229,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
                                              &fsnotify_mark_srcu);
        }
 
+       /*
+        * We need to merge inode & vfsmount mark lists so that inode mark
+        * ignore masks are properly reflected for mount mark notifications.
+        * That's why this traversal is so complicated...
+        */
        while (inode_node || vfsmount_node) {
-               inode_group = vfsmount_group = NULL;
+               inode_group = NULL;
+               inode_mark = NULL;
+               vfsmount_group = NULL;
+               vfsmount_mark = NULL;
 
                if (inode_node) {
                        inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
@@ -244,21 +252,19 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
                        vfsmount_group = vfsmount_mark->group;
                }
 
-               if (inode_group > vfsmount_group) {
-                       /* handle inode */
-                       ret = send_to_group(to_tell, inode_mark, NULL, mask,
-                                           data, data_is, cookie, file_name);
-                       /* we didn't use the vfsmount_mark */
-                       vfsmount_group = NULL;
-               } else if (vfsmount_group > inode_group) {
-                       ret = send_to_group(to_tell, NULL, vfsmount_mark, mask,
-                                           data, data_is, cookie, file_name);
-                       inode_group = NULL;
-               } else {
-                       ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
-                                           mask, data, data_is, cookie,
-                                           file_name);
+               if (inode_group && vfsmount_group) {
+                       int cmp = fsnotify_compare_groups(inode_group,
+                                                         vfsmount_group);
+                       if (cmp > 0) {
+                               inode_group = NULL;
+                               inode_mark = NULL;
+                       } else if (cmp < 0) {
+                               vfsmount_group = NULL;
+                               vfsmount_mark = NULL;
+                       }
                }
+               ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
+                                   data, data_is, cookie, file_name);
 
                if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
                        goto out;
index 9c0898c4cfe1ce771a8a0832adda51c80225ce59..3b68b0ae0a97cb6beb6b642f3098de5c8aaec4d0 100644 (file)
@@ -12,6 +12,10 @@ extern void fsnotify_flush_notify(struct fsnotify_group *group);
 /* protects reads of inode and vfsmount marks list */
 extern struct srcu_struct fsnotify_mark_srcu;
 
+/* compare two groups for sorting of marks lists */
+extern int fsnotify_compare_groups(struct fsnotify_group *a,
+                                  struct fsnotify_group *b);
+
 extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
                                                __u32 mask);
 /* add a mark to an inode */
index e8497144b32342437377748f26a616168455339a..dfbf5447eea4cea8fdf664ff5b0232f8a61e68b0 100644 (file)
@@ -194,6 +194,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
 {
        struct fsnotify_mark *lmark, *last = NULL;
        int ret = 0;
+       int cmp;
 
        mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
 
@@ -219,11 +220,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
                        goto out;
                }
 
-               if (mark->group->priority < lmark->group->priority)
-                       continue;
-
-               if ((mark->group->priority == lmark->group->priority) &&
-                   (mark->group < lmark->group))
+               cmp = fsnotify_compare_groups(lmark->group, mark->group);
+               if (cmp < 0)
                        continue;
 
                hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
index d90deaa08e78f6e82cde921f5914000bc9ca4a97..34c38fabf514f1a892e10e2e24ff4a3d252ec000 100644 (file)
@@ -209,6 +209,42 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
        mark->ignored_mask = mask;
 }
 
+/*
+ * Sorting function for lists of fsnotify marks.
+ *
+ * Fanotify supports different notification classes (reflected as priority of
+ * notification group). Events shall be passed to notification groups in
+ * decreasing priority order. To achieve this marks in notification lists for
+ * inodes and vfsmounts are sorted so that priorities of corresponding groups
+ * are descending.
+ *
+ * Furthermore correct handling of the ignore mask requires processing inode
+ * and vfsmount marks of each group together. Using the group address as
+ * further sort criterion provides a unique sorting order and thus we can
+ * merge inode and vfsmount lists of marks in linear time and find groups
+ * present in both lists.
+ *
+ * A return value of 1 signifies that b has priority over a.
+ * A return value of 0 signifies that the two marks have to be handled together.
+ * A return value of -1 signifies that a has priority over b.
+ */
+int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
+{
+       if (a == b)
+               return 0;
+       if (!a)
+               return 1;
+       if (!b)
+               return -1;
+       if (a->priority < b->priority)
+               return 1;
+       if (a->priority > b->priority)
+               return -1;
+       if (a < b)
+               return 1;
+       return -1;
+}
+
 /*
  * Attach an initialized mark to a given group and fs object.
  * These marks may be used for the fsnotify backend to determine which
index ac851e8376b1931d88adcf4ff5eaa8bd2445a635..faefa72a11ebaacff32569535e69a9005a0d4f0f 100644 (file)
@@ -153,6 +153,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
        struct mount *m = real_mount(mnt);
        struct fsnotify_mark *lmark, *last = NULL;
        int ret = 0;
+       int cmp;
 
        mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
 
@@ -178,11 +179,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
                        goto out;
                }
 
-               if (mark->group->priority < lmark->group->priority)
-                       continue;
-
-               if ((mark->group->priority == lmark->group->priority) &&
-                   (mark->group < lmark->group))
+               cmp = fsnotify_compare_groups(lmark->group, mark->group);
+               if (cmp < 0)
                        continue;
 
                hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
index 4e2bd4c95b66ff245fa78d64a455267ac646fcff..0995c2de8162c2f6368647503ddda9017a68f6c6 100644 (file)
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
 extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
 
 extern unsigned long free_all_bootmem(void);
+extern void reset_node_managed_pages(pg_data_t *pgdat);
 extern void reset_all_zones_managed_pages(void);
 
 extern void free_bootmem_node(pg_data_t *pgdat,
index fc17d56581b2954c877eebcc637adca43a45321f..582e67f340543490ad917dbaf5fd01be25fafa14 100644 (file)
@@ -330,6 +330,13 @@ enum max77693_irq_source {
        MAX77693_IRQ_GROUP_NR,
 };
 
+#define SRC_IRQ_CHARGER                        BIT(0)
+#define SRC_IRQ_TOP                    BIT(1)
+#define SRC_IRQ_FLASH                  BIT(2)
+#define SRC_IRQ_MUIC                   BIT(3)
+#define SRC_IRQ_ALL                    (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
+                                               | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
+
 #define LED_IRQ_FLED2_OPEN             BIT(0)
 #define LED_IRQ_FLED2_SHORT            BIT(1)
 #define LED_IRQ_FLED1_OPEN             BIT(2)
index 48bf12ef6620ccc863c27afc615a2e2e460a6c99..ffe66e381c04237fb54a0447741f39a40d7c5d71 100644 (file)
@@ -431,6 +431,15 @@ struct zone {
         */
        int                     nr_migrate_reserve_block;
 
+#ifdef CONFIG_MEMORY_ISOLATION
+       /*
+        * Number of isolated pageblock. It is used to solve incorrect
+        * freepage counting problem due to racy retrieving migratetype
+        * of pageblock. Protected by zone->lock.
+        */
+       unsigned long           nr_isolate_pageblock;
+#endif
+
 #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
index 3fff8e774067904bb73b5817ba471099a80555da..2dc1e1697b451ce678781a55776a15c8934be7a5 100644 (file)
@@ -2,6 +2,10 @@
 #define __LINUX_PAGEISOLATION_H
 
 #ifdef CONFIG_MEMORY_ISOLATION
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+       return zone->nr_isolate_pageblock;
+}
 static inline bool is_migrate_isolate_page(struct page *page)
 {
        return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
        return migratetype == MIGRATE_ISOLATE;
 }
 #else
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+       return false;
+}
 static inline bool is_migrate_isolate_page(struct page *page)
 {
        return false;
index 73e938b7e9374c68ac00fd99c65247eac9241fd4..2e0e06daf8c0692c16561eb35928b1502647f1ff 100644 (file)
@@ -72,8 +72,10 @@ struct generic_pm_domain {
        bool max_off_time_changed;
        bool cached_power_down_ok;
        struct gpd_cpuidle_data *cpuidle_data;
-       void (*attach_dev)(struct device *dev);
-       void (*detach_dev)(struct device *dev);
+       int (*attach_dev)(struct generic_pm_domain *domain,
+                         struct device *dev);
+       void (*detach_dev)(struct generic_pm_domain *domain,
+                          struct device *dev);
 };
 
 static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -104,7 +106,7 @@ struct generic_pm_domain_data {
        struct notifier_block nb;
        struct mutex lock;
        unsigned int refcount;
-       bool need_restore;
+       int need_restore;
 };
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
index 49a4d6f59108f957d4534190c161344981c48e46..e2c13cd863bdc5b41a65731a4e55018aee3762f4 100644 (file)
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
        __ring_buffer_alloc((size), (flags), &__key);   \
 })
 
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
                          struct file *filp, poll_table *poll_table);
 
index ec538fc287a66000f7ce357f4bfe8012223acc26..bb9b83640070f4a366a8ca67f56495f07aaa7830 100644 (file)
@@ -256,7 +256,7 @@ struct ucred {
 #define MSG_EOF         MSG_FIN
 
 #define MSG_FASTOPEN   0x20000000      /* Send data in TCP SYN */
-#define MSG_CMSG_CLOEXEC 0x40000000    /* Set close_on_exit for file
+#define MSG_CMSG_CLOEXEC 0x40000000    /* Set close_on_exec for file
                                           descriptor received through
                                           SCM_RIGHTS */
 #if defined(CONFIG_COMPAT)
index d9fa68f26c41c34c33db5f743a4142faf7886792..2a25dec3021166d5aba52ad155e8ca01e0b1570e 100644 (file)
@@ -34,7 +34,6 @@
  * @list: used to maintain a list of currently available transports
  * @name: the human-readable name of the transport
  * @maxsize: transport provided maximum packet size
- * @pref: Preferences of this transport
  * @def: set if this transport should be considered the default
  * @create: member function to create a new connection on this transport
  * @close: member function to discard a connection on this transport
index a47790bcaa3831b1c2692b3cf27c4306ff39e631..2a50a70ef5870c76e0694ca460182671df46973e 100644 (file)
@@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
        return iptunnel_handle_offloads(skb, udp_csum, type);
 }
 
+static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       struct udphdr *uh;
+
+       uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
+       skb_shinfo(skb)->gso_type |= uh->check ?
+                               SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+}
+
 static inline void udp_tunnel_encap_enable(struct socket *sock)
 {
 #if IS_ENABLED(CONFIG_IPV6)
index b70237e8bc37b49bb9905669fb5d109ed17300fb..4c94f31a8c99b68e3ada83ed75b3726de3043a08 100644 (file)
@@ -125,6 +125,7 @@ header-y += filter.h
 header-y += firewire-cdev.h
 header-y += firewire-constants.h
 header-y += flat.h
+header-y += fou.h
 header-y += fs.h
 header-y += fsl_hypervisor.h
 header-y += fuse.h
@@ -141,6 +142,7 @@ header-y += hid.h
 header-y += hiddev.h
 header-y += hidraw.h
 header-y += hpet.h
+header-y += hsr_netlink.h
 header-y += hyperv.h
 header-y += hysdn_if.h
 header-y += i2c-dev.h
@@ -251,6 +253,7 @@ header-y += mii.h
 header-y += minix_fs.h
 header-y += mman.h
 header-y += mmtimer.h
+header-y += mpls.h
 header-y += mqueue.h
 header-y += mroute.h
 header-y += mroute6.h
@@ -424,6 +427,7 @@ header-y += virtio_net.h
 header-y += virtio_pci.h
 header-y += virtio_ring.h
 header-y += virtio_rng.h
+header=y += vm_sockets.h
 header-y += vt.h
 header-y += wait.h
 header-y += wanrouter.h
index 39f621a9fe826cb9a0eab487cd36a51aed5440ab..da17e456908d2d16d1500499858296a042b3186c 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/types.h>
 #include <linux/if_ether.h>
+#include <linux/in6.h>
 
 #define SYSFS_BRIDGE_ATTR      "bridge"
 #define SYSFS_BRIDGE_FDB       "brforward"
index 800a0daede7e4d8b8940288bd30dfb43a6e922a8..321d0ceb26d3782ed6871feef4c52090e9872b23 100644 (file)
@@ -544,7 +544,7 @@ asmlinkage __visible void __init start_kernel(void)
                                  static_command_line, __start___param,
                                  __stop___param - __start___param,
                                  -1, -1, &unknown_bootoption);
-       if (after_dashes)
+       if (!IS_ERR_OR_NULL(after_dashes))
                parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
                           set_init_arg);
 
index 80983df92cd441f981090394252126fbde22cd64..cebb11db4d342642efccdc6fdebdea638353324c 100644 (file)
@@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
        audit_log_task_info(ab, current);
-       audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
+       audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
                         audit_feature_names[which], !!old_feature, !!new_feature,
                         !!old_lock, !!new_lock, res);
        audit_log_end(ab);
index e242e3a9864ad6032e3d32704aa9b95550036b8a..80f29e0155705159fc83a80c1138e45b590661e2 100644 (file)
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
                chunk->owners[i].index = i;
        }
        fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+       chunk->mark.mask = FS_IN_IGNORED;
        return chunk;
 }
 
index d09dc5c32c6740e41a5987cca0252ad86b43bdbd..cf80672b79246dd439f64cea16d3e631e7d35f2f 100644 (file)
@@ -244,6 +244,7 @@ static const struct tnt tnts[] = {
  *  'I' - Working around severe firmware bug.
  *  'O' - Out-of-tree module has been loaded.
  *  'E' - Unsigned module has been loaded.
+ *  'L' - A soft lockup has previously occurred.
  *
  *     The string is overwritten by the next call to print_tainted().
  */
index 4ca9a33ff62020e63d15219ce9f097611ebf6507..c347e3ce3a55df9efe054caaffd08bed4bb9e886 100644 (file)
@@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state)
 
 static int platform_suspend_prepare_late(suspend_state_t state)
 {
-       return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ?
+       return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
                freeze_ops->prepare() : 0;
 }
 
@@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state)
 
 static void platform_resume_early(suspend_state_t state)
 {
-       if (state == PM_SUSPEND_FREEZE && freeze_ops->restore)
+       if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
                freeze_ops->restore();
 }
 
index 2d75c94ae87d871bbf42db7b1ee949463bb7f65f..a56e07c8d15b8b730eb54f2020a018ff440eec6c 100644 (file)
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
  * ring_buffer_wait - wait for input to the ring buffer
  * @buffer: buffer to wait on
  * @cpu: the cpu buffer to wait on
+ * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
  *
  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  */
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
 {
-       struct ring_buffer_per_cpu *cpu_buffer;
+       struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
        DEFINE_WAIT(wait);
        struct rb_irq_work *work;
+       int ret = 0;
 
        /*
         * Depending on what the caller is waiting for, either any
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
        }
 
 
-       prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+       while (true) {
+               prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 
-       /*
-        * The events can happen in critical sections where
-        * checking a work queue can cause deadlocks.
-        * After adding a task to the queue, this flag is set
-        * only to notify events to try to wake up the queue
-        * using irq_work.
-        *
-        * We don't clear it even if the buffer is no longer
-        * empty. The flag only causes the next event to run
-        * irq_work to do the work queue wake up. The worse
-        * that can happen if we race with !trace_empty() is that
-        * an event will cause an irq_work to try to wake up
-        * an empty queue.
-        *
-        * There's no reason to protect this flag either, as
-        * the work queue and irq_work logic will do the necessary
-        * synchronization for the wake ups. The only thing
-        * that is necessary is that the wake up happens after
-        * a task has been queued. It's OK for spurious wake ups.
-        */
-       work->waiters_pending = true;
+               /*
+                * The events can happen in critical sections where
+                * checking a work queue can cause deadlocks.
+                * After adding a task to the queue, this flag is set
+                * only to notify events to try to wake up the queue
+                * using irq_work.
+                *
+                * We don't clear it even if the buffer is no longer
+                * empty. The flag only causes the next event to run
+                * irq_work to do the work queue wake up. The worse
+                * that can happen if we race with !trace_empty() is that
+                * an event will cause an irq_work to try to wake up
+                * an empty queue.
+                *
+                * There's no reason to protect this flag either, as
+                * the work queue and irq_work logic will do the necessary
+                * synchronization for the wake ups. The only thing
+                * that is necessary is that the wake up happens after
+                * a task has been queued. It's OK for spurious wake ups.
+                */
+               work->waiters_pending = true;
+
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
+               if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
+                       break;
+
+               if (cpu != RING_BUFFER_ALL_CPUS &&
+                   !ring_buffer_empty_cpu(buffer, cpu)) {
+                       unsigned long flags;
+                       bool pagebusy;
+
+                       if (!full)
+                               break;
+
+                       raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+                       pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+                       raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+                       if (!pagebusy)
+                               break;
+               }
 
-       if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
-           (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
                schedule();
+       }
 
        finish_wait(&work->waiters, &wait);
-       return 0;
+
+       return ret;
 }
 
 /**
index 8a528392b1f465da19d297a84699c4931102cf3d..92f4a6cee1727360ff9d739a126f168a6cacd980 100644 (file)
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 }
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
-static int wait_on_pipe(struct trace_iterator *iter)
+static int wait_on_pipe(struct trace_iterator *iter, bool full)
 {
        /* Iterators are static, they should be filled or empty */
        if (trace_buffer_iter(iter, iter->cpu_file))
                return 0;
 
-       return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+       return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
+                               full);
 }
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
 
                mutex_unlock(&iter->mutex);
 
-               ret = wait_on_pipe(iter);
+               ret = wait_on_pipe(iter, false);
 
                mutex_lock(&iter->mutex);
 
                if (ret)
                        return ret;
-
-               if (signal_pending(current))
-                       return -EINTR;
        }
 
        return 1;
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
                                goto out_unlock;
                        }
                        mutex_unlock(&trace_types_lock);
-                       ret = wait_on_pipe(iter);
+                       ret = wait_on_pipe(iter, false);
                        mutex_lock(&trace_types_lock);
                        if (ret) {
                                size = ret;
                                goto out_unlock;
                        }
-                       if (signal_pending(current)) {
-                               size = -EINTR;
-                               goto out_unlock;
-                       }
                        goto again;
                }
                size = 0;
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        };
        struct buffer_ref *ref;
        int entries, size, i;
-       ssize_t ret;
+       ssize_t ret = 0;
 
        mutex_lock(&trace_types_lock);
 
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                int r;
 
                ref = kzalloc(sizeof(*ref), GFP_KERNEL);
-               if (!ref)
+               if (!ref) {
+                       ret = -ENOMEM;
                        break;
+               }
 
                ref->ref = 1;
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (!ref->page) {
+                       ret = -ENOMEM;
                        kfree(ref);
                        break;
                }
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 
        /* did we read anything? */
        if (!spd.nr_pages) {
+               if (ret)
+                       goto out;
+
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
                        ret = -EAGAIN;
                        goto out;
                }
                mutex_unlock(&trace_types_lock);
-               ret = wait_on_pipe(iter);
+               ret = wait_on_pipe(iter, true);
                mutex_lock(&trace_types_lock);
                if (ret)
                        goto out;
-               if (signal_pending(current)) {
-                       ret = -EINTR;
-                       goto out;
-               }
+
                goto again;
        }
 
index 081be3ba9ea8285b05374e6bb98299af541f567c..624a0b7c05ef15934cee4a2574852f74620878a9 100644 (file)
@@ -230,7 +230,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
        ht->shift++;
 
        /* For each new bucket, search the corresponding old bucket
-        * for the rst entry that hashes to the new bucket, and
+        * for the first entry that hashes to the new bucket, and
         * link the new bucket to that entry. Since all the entries
         * which will end up in the new bucket appear in the same
         * old bucket, this constructs an entirely valid new hash
@@ -248,8 +248,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
        }
 
        /* Publish the new table pointer. Lookups may now traverse
-        * the new table, but they will not benet from any
-        * additional efciency until later steps unzip the buckets.
+        * the new table, but they will not benefit from any
+        * additional efficiency until later steps unzip the buckets.
         */
        rcu_assign_pointer(ht->tbl, new_tbl);
 
@@ -306,14 +306,14 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
 
        ht->shift--;
 
-       /* Link each bucket in the new table to the rst bucket
+       /* Link each bucket in the new table to the first bucket
         * in the old table that contains entries which will hash
         * to the new bucket.
         */
        for (i = 0; i < ntbl->size; i++) {
                ntbl->buckets[i] = tbl->buckets[i];
 
-               /* Link each bucket in the new table to the rst bucket
+               /* Link each bucket in the new table to the first bucket
                 * in the old table that contains entries which will hash
                 * to the new bucket.
                 */
index 8a000cebb0d7428d5ec48dcfa979086c57e85109..477be696511d669230b47c73d52a8b3c1836c457 100644 (file)
@@ -243,13 +243,10 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 
 static int reset_managed_pages_done __initdata;
 
-static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
+void reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       if (reset_managed_pages_done)
-               return;
-
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
                z->managed_pages = 0;
 }
@@ -258,8 +255,12 @@ void __init reset_all_zones_managed_pages(void)
 {
        struct pglist_data *pgdat;
 
+       if (reset_managed_pages_done)
+               return;
+
        for_each_online_pgdat(pgdat)
                reset_node_managed_pages(pgdat);
+
        reset_managed_pages_done = 1;
 }
 
index ec74cf0123efd3944894cc0b159b385d6b837f25..f9792ba3537ccc830594e7954715ca66eb2e9654 100644 (file)
@@ -479,6 +479,16 @@ isolate_freepages_range(struct compact_control *cc,
 
                block_end_pfn = min(block_end_pfn, end_pfn);
 
+               /*
+                * pfn could pass the block_end_pfn if isolated freepage
+                * is more than pageblock order. In this case, we adjust
+                * scanning range to right one.
+                */
+               if (pfn >= block_end_pfn) {
+                       block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+                       block_end_pfn = min(block_end_pfn, end_pfn);
+               }
+
                if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
                        break;
 
@@ -1029,8 +1039,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        }
 
        acct_isolated(zone, cc);
-       /* Record where migration scanner will be restarted */
-       cc->migrate_pfn = low_pfn;
+       /*
+        * Record where migration scanner will be restarted. If we end up in
+        * the same pageblock as the free scanner, make the scanners fully
+        * meet so that compact_finished() terminates compaction.
+        */
+       cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
 
        return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
 }
index 829304090b90e8ff57ee3eaf5281987deccb7e55..a4f90ba7068ef0af12ccdff8b3dc7408f772b447 100644 (file)
@@ -108,6 +108,31 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 /*
  * in mm/page_alloc.c
  */
+
+/*
+ * Locate the struct page for both the matching buddy in our
+ * pair (buddy1) and the combined O(n+1) page they form (page).
+ *
+ * 1) Any buddy B1 will have an order O twin B2 which satisfies
+ * the following equation:
+ *     B2 = B1 ^ (1 << O)
+ * For example, if the starting buddy (buddy2) is #8 its order
+ * 1 buddy is #10:
+ *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
+ *
+ * 2) Any buddy B will have an order O+1 parent P which
+ * satisfies the following equation:
+ *     P = B & ~(1 << O)
+ *
+ * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
+ */
+static inline unsigned long
+__find_buddy_index(unsigned long page_idx, unsigned int order)
+{
+       return page_idx ^ (1 << order);
+}
+
+extern int __isolate_free_page(struct page *page, unsigned int order);
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
index eafcf60f6b832b202a48f8a9ba66ac0d87989443..e34a3cb6aad6cb078c0801efc6891ba83f5db155 100644 (file)
@@ -911,9 +911,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
        if (i->nr_segs == 1)
                return i->count;
        else if (i->type & ITER_BVEC)
-               return min(i->count, i->iov->iov_len - i->iov_offset);
-       else
                return min(i->count, i->bvec->bv_len - i->iov_offset);
+       else
+               return min(i->count, i->iov->iov_len - i->iov_offset);
 }
 EXPORT_SYMBOL(iov_iter_single_seg_count);
 
index 252e1dbbed86e9a81011ac8d135d9580969a1141..1bf4807cb21e49ccbd1bb4232574507e8d41384c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/stop_machine.h>
 #include <linux/hugetlb.h>
 #include <linux/memblock.h>
+#include <linux/bootmem.h>
 
 #include <asm/tlbflush.h>
 
@@ -1066,6 +1067,16 @@ out:
 }
 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 
+static void reset_node_present_pages(pg_data_t *pgdat)
+{
+       struct zone *z;
+
+       for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+               z->present_pages = 0;
+
+       pgdat->node_present_pages = 0;
+}
+
 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 {
@@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
        build_all_zonelists(pgdat, NULL);
        mutex_unlock(&zonelists_mutex);
 
+       /*
+        * zone->managed_pages is set to an approximate value in
+        * free_area_init_core(), which will cause
+        * /sys/device/system/node/nodeX/meminfo has wrong data.
+        * So reset it to 0 before any memory is onlined.
+        */
+       reset_node_managed_pages(pgdat);
+
+       /*
+        * When memory is hot-added, all the memory is in offline state. So
+        * clear all zones' present_pages because they will be updated in
+        * online_pages() and offline_pages().
+        */
+       reset_node_present_pages(pgdat);
+
        return pgdat;
 }
 
index 7c7ab32ee5032dad07354f438b5832649aaa044b..90b50468333e38563d4388096e584b6c23fa9132 100644 (file)
@@ -145,12 +145,10 @@ static unsigned long __init free_low_memory_core_early(void)
 
 static int reset_managed_pages_done __initdata;
 
-static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
+void reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       if (reset_managed_pages_done)
-               return;
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
                z->managed_pages = 0;
 }
@@ -159,8 +157,12 @@ void __init reset_all_zones_managed_pages(void)
 {
        struct pglist_data *pgdat;
 
+       if (reset_managed_pages_done)
+               return;
+
        for_each_online_pgdat(pgdat)
                reset_node_managed_pages(pgdat);
+
        reset_managed_pages_done = 1;
 }
 
index 9cd36b822444433539fbe0cc3acf8f312172345d..616a2c956b4b2a6aee5cc1f7d0098cf4a4cd5912 100644 (file)
@@ -466,29 +466,6 @@ static inline void rmv_page_order(struct page *page)
        set_page_private(page, 0);
 }
 
-/*
- * Locate the struct page for both the matching buddy in our
- * pair (buddy1) and the combined O(n+1) page they form (page).
- *
- * 1) Any buddy B1 will have an order O twin B2 which satisfies
- * the following equation:
- *     B2 = B1 ^ (1 << O)
- * For example, if the starting buddy (buddy2) is #8 its order
- * 1 buddy is #10:
- *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
- *
- * 2) Any buddy B will have an order O+1 parent P which
- * satisfies the following equation:
- *     P = B & ~(1 << O)
- *
- * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
- */
-static inline unsigned long
-__find_buddy_index(unsigned long page_idx, unsigned int order)
-{
-       return page_idx ^ (1 << order);
-}
-
 /*
  * This function checks whether a page is free && is the buddy
  * we can do coalesce a page and its buddy if
@@ -569,6 +546,7 @@ static inline void __free_one_page(struct page *page,
        unsigned long combined_idx;
        unsigned long uninitialized_var(buddy_idx);
        struct page *buddy;
+       int max_order = MAX_ORDER;
 
        VM_BUG_ON(!zone_is_initialized(zone));
 
@@ -577,13 +555,24 @@ static inline void __free_one_page(struct page *page,
                        return;
 
        VM_BUG_ON(migratetype == -1);
+       if (is_migrate_isolate(migratetype)) {
+               /*
+                * We restrict max order of merging to prevent merge
+                * between freepages on isolate pageblock and normal
+                * pageblock. Without this, pageblock isolation
+                * could cause incorrect freepage accounting.
+                */
+               max_order = min(MAX_ORDER, pageblock_order + 1);
+       } else {
+               __mod_zone_freepage_state(zone, 1 << order, migratetype);
+       }
 
-       page_idx = pfn & ((1 << MAX_ORDER) - 1);
+       page_idx = pfn & ((1 << max_order) - 1);
 
        VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
        VM_BUG_ON_PAGE(bad_range(zone, page), page);
 
-       while (order < MAX_ORDER-1) {
+       while (order < max_order - 1) {
                buddy_idx = __find_buddy_index(page_idx, order);
                buddy = page + (buddy_idx - page_idx);
                if (!page_is_buddy(page, buddy, order))
@@ -594,9 +583,11 @@ static inline void __free_one_page(struct page *page,
                 */
                if (page_is_guard(buddy)) {
                        clear_page_guard_flag(buddy);
-                       set_page_private(page, 0);
-                       __mod_zone_freepage_state(zone, 1 << order,
-                                                 migratetype);
+                       set_page_private(buddy, 0);
+                       if (!is_migrate_isolate(migratetype)) {
+                               __mod_zone_freepage_state(zone, 1 << order,
+                                                         migratetype);
+                       }
                } else {
                        list_del(&buddy->lru);
                        zone->free_area[order].nr_free--;
@@ -715,14 +706,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
                        mt = get_freepage_migratetype(page);
+                       if (unlikely(has_isolate_pageblock(zone)))
+                               mt = get_pageblock_migratetype(page);
+
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, page_to_pfn(page), zone, 0, mt);
                        trace_mm_page_pcpu_drain(page, 0, mt);
-                       if (likely(!is_migrate_isolate_page(page))) {
-                               __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
-                               if (is_migrate_cma(mt))
-                                       __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
-                       }
                } while (--to_free && --batch_free && !list_empty(list));
        }
        spin_unlock(&zone->lock);
@@ -739,9 +728,11 @@ static void free_one_page(struct zone *zone,
        if (nr_scanned)
                __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
 
+       if (unlikely(has_isolate_pageblock(zone) ||
+               is_migrate_isolate(migratetype))) {
+               migratetype = get_pfnblock_migratetype(page, pfn);
+       }
        __free_one_page(page, pfn, zone, order, migratetype);
-       if (unlikely(!is_migrate_isolate(migratetype)))
-               __mod_zone_freepage_state(zone, 1 << order, migratetype);
        spin_unlock(&zone->lock);
 }
 
@@ -1484,7 +1475,7 @@ void split_page(struct page *page, unsigned int order)
 }
 EXPORT_SYMBOL_GPL(split_page);
 
-static int __isolate_free_page(struct page *page, unsigned int order)
+int __isolate_free_page(struct page *page, unsigned int order)
 {
        unsigned long watermark;
        struct zone *zone;
@@ -6408,13 +6399,12 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
-                      outer_start, end);
+               pr_info("%s: [%lx, %lx) PFNs busy\n",
+                       __func__, outer_start, end);
                ret = -EBUSY;
                goto done;
        }
 
-
        /* Grab isolated pages from freelists. */
        outer_end = isolate_freepages_range(&cc, outer_start, end);
        if (!outer_end) {
index d1473b2e9481731988695755a618baa0991556a7..c8778f7e208e8a4a640e2c12f091956f4aa33575 100644 (file)
@@ -60,6 +60,7 @@ out:
                int migratetype = get_pageblock_migratetype(page);
 
                set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+               zone->nr_isolate_pageblock++;
                nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
 
                __mod_zone_freepage_state(zone, -nr_pages, migratetype);
@@ -75,16 +76,54 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
        struct zone *zone;
        unsigned long flags, nr_pages;
+       struct page *isolated_page = NULL;
+       unsigned int order;
+       unsigned long page_idx, buddy_idx;
+       struct page *buddy;
 
        zone = page_zone(page);
        spin_lock_irqsave(&zone->lock, flags);
        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
                goto out;
-       nr_pages = move_freepages_block(zone, page, migratetype);
-       __mod_zone_freepage_state(zone, nr_pages, migratetype);
+
+       /*
+        * Because freepage with more than pageblock_order on isolated
+        * pageblock is restricted to merge due to freepage counting problem,
+        * it is possible that there is free buddy page.
+        * move_freepages_block() doesn't care of merge so we need other
+        * approach in order to merge them. Isolation and free will make
+        * these pages to be merged.
+        */
+       if (PageBuddy(page)) {
+               order = page_order(page);
+               if (order >= pageblock_order) {
+                       page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
+                       buddy_idx = __find_buddy_index(page_idx, order);
+                       buddy = page + (buddy_idx - page_idx);
+
+                       if (!is_migrate_isolate_page(buddy)) {
+                               __isolate_free_page(page, order);
+                               set_page_refcounted(page);
+                               isolated_page = page;
+                       }
+               }
+       }
+
+       /*
+        * If we isolate freepage with more than pageblock_order, there
+        * should be no freepage in the range, so we could avoid costly
+        * pageblock scanning for freepage moving.
+        */
+       if (!isolated_page) {
+               nr_pages = move_freepages_block(zone, page, migratetype);
+               __mod_zone_freepage_state(zone, nr_pages, migratetype);
+       }
        set_pageblock_migratetype(page, migratetype);
+       zone->nr_isolate_pageblock--;
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
+       if (isolated_page)
+               __free_pages(isolated_page, order);
 }
 
 static inline struct page *
index 406944207b61dbd607bc7f2d2b244b6998f47254..dcdab81bd240bafe3bec02cb32bf3390763a90e9 100644 (file)
@@ -259,6 +259,10 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
                if (s->size - size >= sizeof(void *))
                        continue;
 
+               if (IS_ENABLED(CONFIG_SLAB) && align &&
+                       (align > s->align || s->align % align))
+                       continue;
+
                return s;
        }
        return NULL;
index 654c9018e3e75a5f0bd01ba2197a7777e8626bf5..48da2c54a69e36aa1ae2f1d9e84cdb82e49a84f3 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/netfilter/ipv6/nf_reject.h>
 #include <linux/ip.h>
 #include <net/ip.h>
+#include <net/ip6_checksum.h>
 #include <linux/netfilter_bridge.h>
 #include "../br_private.h"
 
index 62fc5e7a9acf7506eba2de7ae314ba6067870ceb..790fe89d90c0ac49301bfcc81ba1b6633b9559cd 100644 (file)
@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
 
 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
 
+/*
+ * Should be used for buffers allocated with ceph_kvmalloc().
+ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
+ * in-buffer (msg front).
+ *
+ * Dispose of @sgt with teardown_sgtable().
+ *
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
+ * in cases where a single sg is sufficient.  No attempt to reduce the
+ * number of sgs by squeezing physically contiguous pages together is
+ * made though, for simplicity.
+ */
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
+                        const void *buf, unsigned int buf_len)
+{
+       struct scatterlist *sg;
+       const bool is_vmalloc = is_vmalloc_addr(buf);
+       unsigned int off = offset_in_page(buf);
+       unsigned int chunk_cnt = 1;
+       unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
+       int i;
+       int ret;
+
+       if (buf_len == 0) {
+               memset(sgt, 0, sizeof(*sgt));
+               return -EINVAL;
+       }
+
+       if (is_vmalloc) {
+               chunk_cnt = chunk_len >> PAGE_SHIFT;
+               chunk_len = PAGE_SIZE;
+       }
+
+       if (chunk_cnt > 1) {
+               ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
+               if (ret)
+                       return ret;
+       } else {
+               WARN_ON(chunk_cnt != 1);
+               sg_init_table(prealloc_sg, 1);
+               sgt->sgl = prealloc_sg;
+               sgt->nents = sgt->orig_nents = 1;
+       }
+
+       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+               struct page *page;
+               unsigned int len = min(chunk_len - off, buf_len);
+
+               if (is_vmalloc)
+                       page = vmalloc_to_page(buf);
+               else
+                       page = virt_to_page(buf);
+
+               sg_set_page(sg, page, len, off);
+
+               off = 0;
+               buf += len;
+               buf_len -= len;
+       }
+       WARN_ON(buf_len != 0);
+
+       return 0;
+}
+
+static void teardown_sgtable(struct sg_table *sgt)
+{
+       if (sgt->orig_nents > 1)
+               sg_free_table(sgt);
+}
+
 static int ceph_aes_encrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[2], sg_out[1];
+       struct scatterlist sg_in[2], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 
        *dst_len = src_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 2);
        sg_set_buf(&sg_in[0], src, src_len);
        sg_set_buf(&sg_in[1], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
                             const void *src1, size_t src1_len,
                             const void *src2, size_t src2_len)
 {
-       struct scatterlist sg_in[3], sg_out[1];
+       struct scatterlist sg_in[3], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 
        *dst_len = src1_len + src2_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 3);
        sg_set_buf(&sg_in[0], src1, src1_len);
        sg_set_buf(&sg_in[1], src2, src2_len);
        sg_set_buf(&sg_in[2], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src1_len + src2_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt2 failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[2];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[2], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       sg_init_table(sg_in, 1);
        sg_init_table(sg_out, 2);
-       sg_set_buf(sg_in, src, src_len);
        sg_set_buf(&sg_out[0], dst, *dst_len);
        sg_set_buf(&sg_out[1], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst_len)
@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt2(const void *key, int key_len,
@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                             void *dst2, size_t *dst2_len,
                             const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[3];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[3], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       sg_init_table(sg_in, 1);
-       sg_set_buf(sg_in, src, src_len);
        sg_init_table(sg_out, 3);
        sg_set_buf(&sg_out[0], dst1, *dst1_len);
        sg_set_buf(&sg_out[1], dst2, *dst2_len);
        sg_set_buf(&sg_out[2], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
        crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst1_len)
@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                       dst2, *dst2_len, 1);
        */
 
-       return 0;
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 
index f3fc54eac09d32e691a64b5d65ba90793cba96b8..6f164289bde8860e333c747934da5459e807d7eb 100644 (file)
@@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd)
 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
 {
        dout("__remove_osd %p\n", osd);
-       BUG_ON(!list_empty(&osd->o_requests));
-       BUG_ON(!list_empty(&osd->o_linger_requests));
+       WARN_ON(!list_empty(&osd->o_requests));
+       WARN_ON(!list_empty(&osd->o_linger_requests));
 
        rb_erase(&osd->o_node, &osdc->osds);
        list_del_init(&osd->o_osd_lru);
@@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
                if (list_empty(&req->r_osd_item))
                        req->r_osd = NULL;
        }
+
+       list_del_init(&req->r_req_lru_item); /* can be on notarget */
        ceph_osdc_put_request(req);
 }
 
@@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc,
        if (req->r_osd) {
                __cancel_request(req);
                list_del_init(&req->r_osd_item);
+               list_del_init(&req->r_linger_osd_item);
                req->r_osd = NULL;
        }
 
index 6d1817449c3675bb4ddd6d47a16ea017a5213e0d..ab03e00ffe8f0e2c4095e4f8c33d79c3e6d069da 100644 (file)
@@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
        /* We could not connect to a designated PHY, so use the switch internal
         * MDIO bus instead
         */
-       if (!p->phy)
+       if (!p->phy) {
                p->phy = ds->slave_mii_bus->phy_map[p->port];
-       else
+               phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+                                  p->phy_interface);
+       } else {
                pr_info("attached PHY at address %d [%s]\n",
                        p->phy->addr, p->phy->drv->name);
+       }
 }
 
 int dsa_slave_suspend(struct net_device *slave_dev)
index 32e78924e246bb7f89ad8a7b7a722e04fd879d0e..606c520ffd5af44dd79126ce119945e1f8a070ce 100644 (file)
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff)
        int err = -ENOSYS;
        const struct net_offload **offloads;
 
+       udp_tunnel_gro_complete(skb, nhoff);
+
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[proto]);
index 065cd94c640c0c2a20cbf99c31cba588db54897e..dedb21e9991438259ff66a4e4817349dfd9d3224 100644 (file)
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
        gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
        geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
 
+       skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+
        return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
                                   tos, ttl, df, src_port, dst_port, xnet);
 }
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
 static void __exit geneve_cleanup_module(void)
 {
        destroy_workqueue(geneve_wq);
+       unregister_pernet_subsys(&geneve_net_ops);
 }
 module_exit(geneve_cleanup_module);
 
index c373a9ad45556815d1d95f4faf71b8f8a3f61996..9daf2177dc005c339c536c618fedc11259c178bc 100644 (file)
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
        for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
                if (!CMSG_OK(msg, cmsg))
                        return -EINVAL;
-#if defined(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
                if (allow_ipv6 &&
                    cmsg->cmsg_level == SOL_IPV6 &&
                    cmsg->cmsg_type == IPV6_PKTINFO) {
index a12b455928e52211efdc6b471ef54de6218f5df0..88fa2d1606859de25419d0d45c3095f6d410d42b 100644 (file)
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
 
 /* Undo procedures. */
 
+/* We can clear retrans_stamp when there are no retransmissions in the
+ * window. It would seem that it is trivially available for us in
+ * tp->retrans_out, however, that kind of assumptions doesn't consider
+ * what will happen if errors occur when sending retransmission for the
+ * second time. ...It could the that such segment has only
+ * TCPCB_EVER_RETRANS set at the present time. It seems that checking
+ * the head skb is enough except for some reneging corner cases that
+ * are not worth the effort.
+ *
+ * Main reason for all this complexity is the fact that connection dying
+ * time now depends on the validity of the retrans_stamp, in particular,
+ * that successive retransmissions of a segment must not advance
+ * retrans_stamp under any conditions.
+ */
+static bool tcp_any_retrans_done(const struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (tp->retrans_out)
+               return true;
+
+       skb = tcp_write_queue_head(sk);
+       if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
+               return true;
+
+       return false;
+}
+
 #if FASTRETRANS_DEBUG > 1
 static void DBGUNDO(struct sock *sk, const char *msg)
 {
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                 * is ACKed. For Reno it is MUST to prevent false
                 * fast retransmits (RFC2582). SACK TCP is safe. */
                tcp_moderate_cwnd(tp);
+               if (!tcp_any_retrans_done(sk))
+                       tp->retrans_stamp = 0;
                return true;
        }
        tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
        return false;
 }
 
-/* We can clear retrans_stamp when there are no retransmissions in the
- * window. It would seem that it is trivially available for us in
- * tp->retrans_out, however, that kind of assumptions doesn't consider
- * what will happen if errors occur when sending retransmission for the
- * second time. ...It could the that such segment has only
- * TCPCB_EVER_RETRANS set at the present time. It seems that checking
- * the head skb is enough except for some reneging corner cases that
- * are not worth the effort.
- *
- * Main reason for all this complexity is the fact that connection dying
- * time now depends on the validity of the retrans_stamp, in particular,
- * that successive retransmissions of a segment must not advance
- * retrans_stamp under any conditions.
- */
-static bool tcp_any_retrans_done(const struct sock *sk)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb;
-
-       if (tp->retrans_out)
-               return true;
-
-       skb = tcp_write_queue_head(sk);
-       if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
-               return true;
-
-       return false;
-}
-
 /* Undo during loss recovery after partial ACK or using F-RTO. */
 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
 {
index 12c3c8ef3849467296ced452089008872e64083f..4564e1fca3eb42ab23c8370069417a456cdc76eb 100644 (file)
@@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        else
                dev->flags &= ~IFF_POINTOPOINT;
 
-       dev->iflink = p->link;
-
        /* Precalculate GRE options length */
        if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
                if (t->parms.o_flags&GRE_CSUM)
@@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
                u64_stats_init(&ip6gre_tunnel_stats->syncp);
        }
 
+       dev->iflink = tunnel->parms.link;
 
        return 0;
 }
@@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       dev->iflink = tunnel->parms.link;
+
        return 0;
 }
 
index 9409887fb664dd78d13937b89ea1b5044afdbc94..9cb94cfa0ae71d05a5bd924e58eb225e21dbb62d 100644 (file)
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
        int err;
 
        t = netdev_priv(dev);
-       err = ip6_tnl_dev_init(dev);
-       if (err < 0)
-               goto out;
 
        err = register_netdevice(dev);
        if (err < 0)
@@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 
 
 static const struct net_device_ops ip6_tnl_netdev_ops = {
+       .ndo_init       = ip6_tnl_dev_init,
        .ndo_uninit     = ip6_tnl_dev_uninit,
        .ndo_start_xmit = ip6_tnl_xmit,
        .ndo_do_ioctl   = ip6_tnl_ioctl,
@@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-       int err = ip6_tnl_dev_init_gen(dev);
-
-       if (err)
-               return err;
 
        t->parms.proto = IPPROTO_IPV6;
        dev_hold(dev);
 
-       ip6_tnl_link_config(t);
-
        rcu_assign_pointer(ip6n->tnls_wc[0], t);
        return 0;
 }
index d440bb585524d72202f809ebd1f0cb4ee8b7cc79..31089d153fd332136fcb9f89305ad7ab5bfb214d 100644 (file)
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
        int err;
 
-       err = vti6_dev_init(dev);
-       if (err < 0)
-               goto out;
-
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
@@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 static const struct net_device_ops vti6_netdev_ops = {
+       .ndo_init       = vti6_dev_init,
        .ndo_uninit     = vti6_dev_uninit,
        .ndo_start_xmit = vti6_tnl_xmit,
        .ndo_do_ioctl   = vti6_ioctl,
@@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = dev_net(dev);
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
-       int err = vti6_dev_init_gen(dev);
-
-       if (err)
-               return err;
 
        t->parms.proto = IPPROTO_IPV6;
        dev_hold(dev);
 
-       vti6_link_config(t);
-
        rcu_assign_pointer(ip6n->tnls_wc[0], t);
        return 0;
 }
index 58e5b4710127a996e31b9d2faa97feafc611e39f..a24557a1c1d8b6da7ee51e862ae4b847d9e744d6 100644 (file)
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
        struct sit_net *sitn = net_generic(net, sit_net_id);
        int err;
 
-       err = ipip6_tunnel_init(dev);
-       if (err < 0)
-               goto out;
-       ipip6_tunnel_clone_6rd(dev, sitn);
+       memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
+       memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
 
        if ((__force u16)t->parms.i_flags & SIT_ISATAP)
                dev->priv_flags |= IFF_ISATAP;
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
        if (err < 0)
                goto out;
 
-       strcpy(t->parms.name, dev->name);
+       ipip6_tunnel_clone_6rd(dev, sitn);
+
        dev->rtnl_link_ops = &sit_link_ops;
 
        dev_hold(dev);
@@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 static const struct net_device_ops ipip6_netdev_ops = {
+       .ndo_init       = ipip6_tunnel_init,
        .ndo_uninit     = ipip6_tunnel_uninit,
        .ndo_start_xmit = sit_tunnel_xmit,
        .ndo_do_ioctl   = ipip6_tunnel_ioctl,
@@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
-
-       memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
-       memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+       strcpy(tunnel->parms.name, dev->name);
 
        ipip6_tunnel_bind_dev(dev);
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
-       strcpy(tunnel->parms.name, dev->name);
 
        iph->version            = 4;
        iph->protocol           = IPPROTO_IPV6;
index 56b53571c8077ae5a3660449cb0afeb81375028c..509bc157ce5551700db59e380d180c486b44f097 100644 (file)
@@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 
        memset(&params, 0, sizeof(params));
        memset(&csa_ie, 0, sizeof(csa_ie));
-       err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon,
+       err = ieee80211_parse_ch_switch_ie(sdata, elems,
                                           ifibss->chandef.chan->band,
                                           sta_flags, ifibss->bssid, &csa_ie);
        /* can't switch to destination channel, fail */
index c2aaec4dfcf0ea38da3e45cfdc7b8ad0ecef5fc8..8c68da30595df7793545200c9ce102ae2d5e612d 100644 (file)
@@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
  * ieee80211_parse_ch_switch_ie - parses channel switch IEs
  * @sdata: the sdata of the interface which has received the frame
  * @elems: parsed 802.11 elements received with the frame
- * @beacon: indicates if the frame was a beacon or probe response
  * @current_band: indicates the current band
  * @sta_flags: contains information about own capabilities and restrictions
  *     to decide which channel switch announcements can be accepted. Only the
@@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
  * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
  */
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
-                                struct ieee802_11_elems *elems, bool beacon,
+                                struct ieee802_11_elems *elems,
                                 enum ieee80211_band current_band,
                                 u32 sta_flags, u8 *bssid,
                                 struct ieee80211_csa_ie *csa_ie);
index af237223a8cd9bd3aa6121576650238d42e3bbb0..653f5eb07a27f4432429a8fbbc4b32e7200cf4c3 100644 (file)
@@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        int i, flushed;
        struct ps_data *ps;
        struct cfg80211_chan_def chandef;
+       bool cancel_scan;
 
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
-       if (rcu_access_pointer(local->scan_sdata) == sdata)
+       cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
+       if (cancel_scan)
                ieee80211_scan_cancel(local);
 
        /*
@@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                list_del(&sdata->u.vlan.list);
                mutex_unlock(&local->mtx);
                RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
+               /* see comment in the default case below */
+               ieee80211_free_keys(sdata, true);
                /* no need to tell driver */
                break;
        case NL80211_IFTYPE_MONITOR:
@@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                /*
                 * When we get here, the interface is marked down.
                 * Free the remaining keys, if there are any
-                * (shouldn't be, except maybe in WDS mode?)
+                * (which can happen in AP mode if userspace sets
+                * keys before the interface is operating, and maybe
+                * also in WDS mode)
                 *
                 * Force the key freeing to always synchronize_net()
                 * to wait for the RX path in case it is using this
-                * interface enqueuing frames at this very time on
+                * interface enqueuing frames at this very time on
                 * another CPU.
                 */
                ieee80211_free_keys(sdata, true);
-
-               /* fall through */
-       case NL80211_IFTYPE_AP:
                skb_queue_purge(&sdata->skb_queue);
        }
 
@@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_recalc_ps(local, -1);
 
+       if (cancel_scan)
+               flush_delayed_work(&local->scan_work);
+
        if (local->open_count == 0) {
                ieee80211_stop_device(local);
 
index e9f99c1e3fad5905682a61f978d9897833426fd0..0c8b2a77d312d5e3ad18f975ce808c44755c820b 100644 (file)
@@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
 
        memset(&params, 0, sizeof(params));
        memset(&csa_ie, 0, sizeof(csa_ie));
-       err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band,
+       err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
                                           sta_flags, sdata->vif.addr,
                                           &csa_ie);
        if (err < 0)
index 2de88704278b85b8d9ce47f2ef77905108d456e7..93af0f1c9d991a52d82cbc58bcd842415b22a04a 100644 (file)
@@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 
        current_band = cbss->channel->band;
        memset(&csa_ie, 0, sizeof(csa_ie));
-       res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band,
+       res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
                                           ifmgd->flags,
                                           ifmgd->associated->bssid, &csa_ie);
        if (res < 0)
@@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
        else
                mod_timer(&ifmgd->chswitch_timer,
-                         TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval));
+                         TU_TO_EXP_TIME((csa_ie.count - 1) *
+                                        cbss->beacon_interval));
 }
 
 static bool
index b04ca4049c95f276aa4627501d5ced01e8a3b878..a37f9af634cb6d3c7bb73ada859473e0e085402d 100644 (file)
@@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        sc = le16_to_cpu(hdr->seq_ctrl);
        frag = sc & IEEE80211_SCTL_FRAG;
 
-       if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
-                  is_multicast_ether_addr(hdr->addr1))) {
-               /* not fragmented */
+       if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+               goto out;
+
+       if (is_multicast_ether_addr(hdr->addr1)) {
+               rx->local->dot11MulticastReceivedFrameCount++;
                goto out;
        }
+
        I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
        if (skb_linearize(rx->skb))
@@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
  out:
        if (rx->sta)
                rx->sta->rx_packets++;
-       if (is_multicast_ether_addr(hdr->addr1))
-               rx->local->dot11MulticastReceivedFrameCount++;
-       else
-               ieee80211_led_rx(rx->local);
+       ieee80211_led_rx(rx->local);
        return RX_CONTINUE;
 }
 
index 6ab00907008461fb14d551b2633a8dbcd4fef623..efeba56c913bae0d7284bfc74862599a2ad2298b 100644 (file)
@@ -22,7 +22,7 @@
 #include "wme.h"
 
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
-                                struct ieee802_11_elems *elems, bool beacon,
+                                struct ieee802_11_elems *elems,
                                 enum ieee80211_band current_band,
                                 u32 sta_flags, u8 *bssid,
                                 struct ieee80211_csa_ie *csa_ie)
@@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
                return -EINVAL;
        }
 
-       if (!beacon && sec_chan_offs) {
+       if (sec_chan_offs) {
                secondary_channel_offset = sec_chan_offs->sec_chan_offs;
-       } else if (beacon && ht_oper) {
-               secondary_channel_offset =
-                       ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
        } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
-               /* If it's not a beacon, HT is enabled and the IE not present,
-                * it's 20 MHz, 802.11-2012 8.5.2.6:
-                *      This element [the Secondary Channel Offset Element] is
-                *      present when switching to a 40 MHz channel. It may be
-                *      present when switching to a 20 MHz channel (in which
-                *      case the secondary channel offset is set to SCN).
-                */
+               /* If the secondary channel offset IE is not present,
+                * we can't know what's the post-CSA offset, so the
+                * best we can do is use 20MHz.
+               */
                secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
        }
 
index f1de72de273e20e7422bf6de42ebbca712affacf..0007b818039708bf447d74252712d93f9a8889e0 100644 (file)
@@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups,
                return;
 
        for (undo = 0; undo < group; undo++)
-               if (test_bit(group, &groups))
+               if (test_bit(undo, &groups))
                        nlk->netlink_unbind(undo);
 }
 
@@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        netlink_insert(sk, net, nladdr->nl_pid) :
                        netlink_autobind(sock);
                if (err) {
-                       netlink_unbind(nlk->ngroups - 1, groups, nlk);
+                       netlink_unbind(nlk->ngroups, groups, nlk);
                        return err;
                }
        }
@@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
                nl_table[unit].module = module;
                if (cfg) {
                        nl_table[unit].bind = cfg->bind;
+                       nl_table[unit].unbind = cfg->unbind;
                        nl_table[unit].flags = cfg->flags;
                        if (cfg->compare)
                                nl_table[unit].compare = cfg->compare;
index 0e8529113dc5a009c89bab8be3c87ce0ddbcce49..fb7976aee61c84f38aecdc5c5f0d8be20e577fa9 100644 (file)
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
                list_add(&cur_key->key_list, sh_keys);
 
        cur_key->key = key;
-       sctp_auth_key_hold(key);
-
        return 0;
 nomem:
        if (!replace)
index ab734be8cb209864910f2fd667f2a6a27266f3af..9f32741abb1c7b142265297dc2fac78b74b3d195 100644 (file)
@@ -2609,6 +2609,9 @@ do_addr_param:
                addr_param = param.v + sizeof(sctp_addip_param_t);
 
                af = sctp_get_af_specific(param_type2af(param.p->type));
+               if (af == NULL)
+                       break;
+
                af->from_addr_param(&addr, addr_param,
                                    htons(asoc->peer.port), 0);
 
index e66314138b3822036968abb52b672dcfc02c73ee..c603b20356ade4ca57c42a0cc7260f945d2c4ff5 100644 (file)
@@ -4725,9 +4725,10 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
        err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
        if (err) {
                if (err == -EINVAL) {
-                       WARN_ONCE(1, "selinux_nlmsg_perm: unrecognized netlink message:"
-                                 " protocol=%hu nlmsg_type=%hu sclass=%hu\n",
-                                 sk->sk_protocol, nlh->nlmsg_type, sksec->sclass);
+                       printk(KERN_WARNING
+                              "SELinux: unrecognized netlink message:"
+                              " protocol=%hu nlmsg_type=%hu sclass=%hu\n",
+                              sk->sk_protocol, nlh->nlmsg_type, sksec->sclass);
                        if (!selinux_enforcing || security_get_allow_unknown())
                                err = 0;
                }
index 9ab1e631cb32244262d9a0cfe5009adfe4677aca..16660f312043a71fac284dd7948baab3491d05f3 100644 (file)
@@ -219,6 +219,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
                         "{Intel, LPT_LP},"
                         "{Intel, WPT_LP},"
                         "{Intel, SPT},"
+                        "{Intel, SPT_LP},"
                         "{Intel, HPT},"
                         "{Intel, PBG},"
                         "{Intel, SCH},"
@@ -2004,6 +2005,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Sunrise Point */
        { PCI_DEVICE(0x8086, 0xa170),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       /* Sunrise Point-LP */
+       { PCI_DEVICE(0x8086, 0x9d70),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0a0c),
          .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
index 71e4bad06345c856fcfb80e9e75578d444db117e..e9ebc7bd752cae1afdf95c0b2d2e0e8a15d2393b 100644 (file)
@@ -43,6 +43,7 @@ struct conexant_spec {
        unsigned int num_eapds;
        hda_nid_t eapds[4];
        bool dynamic_eapd;
+       hda_nid_t mute_led_eapd;
 
        unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
 
@@ -163,6 +164,17 @@ static void cx_auto_vmaster_hook(void *private_data, int enabled)
        cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled);
 }
 
+/* turn on/off EAPD according to Master switch (inversely!) for mute LED */
+static void cx_auto_vmaster_hook_mute_led(void *private_data, int enabled)
+{
+       struct hda_codec *codec = private_data;
+       struct conexant_spec *spec = codec->spec;
+
+       snd_hda_codec_write(codec, spec->mute_led_eapd, 0,
+                           AC_VERB_SET_EAPD_BTLENABLE,
+                           enabled ? 0x00 : 0x02);
+}
+
 static int cx_auto_build_controls(struct hda_codec *codec)
 {
        int err;
@@ -223,6 +235,7 @@ enum {
        CXT_FIXUP_TOSHIBA_P105,
        CXT_FIXUP_HP_530,
        CXT_FIXUP_CAP_MIX_AMP_5047,
+       CXT_FIXUP_MUTE_LED_EAPD,
 };
 
 /* for hda_fixup_thinkpad_acpi() */
@@ -557,6 +570,18 @@ static void cxt_fixup_olpc_xo(struct hda_codec *codec,
        }
 }
 
+static void cxt_fixup_mute_led_eapd(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->mute_led_eapd = 0x1b;
+               spec->dynamic_eapd = 1;
+               spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook_mute_led;
+       }
+}
+
 /*
  * Fix max input level on mixer widget to 0dB
  * (originally it has 0x2b steps with 0dB offset 0x14)
@@ -705,6 +730,10 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_cap_mix_amp_5047,
        },
+       [CXT_FIXUP_MUTE_LED_EAPD] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_mute_led_eapd,
+       },
 };
 
 static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -762,6 +791,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
+       SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
@@ -780,6 +810,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
        { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
        { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
+       { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
        {}
 };
 
index da03693099eb55587261c807f157844a90d98539..172395465e8a63963d096ec29f3ee04f7bfe5948 100644 (file)
@@ -288,6 +288,80 @@ static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
        snd_hda_jack_unsol_event(codec, res >> 2);
 }
 
+/* Change EAPD to verb control */
+static void alc_fill_eapd_coef(struct hda_codec *codec)
+{
+       int coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0262:
+               alc_update_coef_idx(codec, 0x7, 0, 1<<5);
+               break;
+       case 0x10ec0267:
+       case 0x10ec0268:
+               alc_update_coef_idx(codec, 0x7, 0, 1<<13);
+               break;
+       case 0x10ec0269:
+               if ((coef & 0x00f0) == 0x0010)
+                       alc_update_coef_idx(codec, 0xd, 0, 1<<14);
+               if ((coef & 0x00f0) == 0x0020)
+                       alc_update_coef_idx(codec, 0x4, 1<<15, 0);
+               if ((coef & 0x00f0) == 0x0030)
+                       alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               break;
+       case 0x10ec0280:
+       case 0x10ec0284:
+       case 0x10ec0290:
+       case 0x10ec0292:
+               alc_update_coef_idx(codec, 0x4, 1<<15, 0);
+               break;
+       case 0x10ec0233:
+       case 0x10ec0255:
+       case 0x10ec0282:
+       case 0x10ec0283:
+       case 0x10ec0286:
+       case 0x10ec0288:
+               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               break;
+       case 0x10ec0285:
+       case 0x10ec0293:
+               alc_update_coef_idx(codec, 0xa, 1<<13, 0);
+               break;
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030)
+                       alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
+               break;
+       case 0x10ec0668:
+               alc_update_coef_idx(codec, 0x7, 3<<13, 0);
+               break;
+       case 0x10ec0867:
+               alc_update_coef_idx(codec, 0x4, 1<<10, 0);
+               break;
+       case 0x10ec0888:
+               if ((coef & 0x00f0) == 0x0020 || (coef & 0x00f0) == 0x0030)
+                       alc_update_coef_idx(codec, 0x7, 1<<5, 0);
+               break;
+       case 0x10ec0892:
+               alc_update_coef_idx(codec, 0x7, 1<<5, 0);
+               break;
+       case 0x10ec0899:
+       case 0x10ec0900:
+               alc_update_coef_idx(codec, 0x7, 1<<1, 0);
+               break;
+       }
+}
+
 /* additional initialization for ALC888 variants */
 static void alc888_coef_init(struct hda_codec *codec)
 {
@@ -339,6 +413,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
 /* generic EAPD initialization */
 static void alc_auto_init_amp(struct hda_codec *codec, int type)
 {
+       alc_fill_eapd_coef(codec);
        alc_auto_setup_eapd(codec, true);
        switch (type) {
        case ALC_INIT_GPIO1:
@@ -5212,9 +5287,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
                }
        }
 
-       /* Class D */
-       alc_update_coef_idx(codec, 0xd, 0, 1<<14);
-
        /* HP */
        alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
@@ -6124,29 +6196,6 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
        {}
 };
 
-static void alc662_fill_coef(struct hda_codec *codec)
-{
-       int coef;
-
-       coef = alc_get_coef0(codec);
-
-       switch (codec->vendor_id) {
-       case 0x10ec0662:
-               if ((coef & 0x00f0) == 0x0030)
-                       alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
-               break;
-       case 0x10ec0272:
-       case 0x10ec0273:
-       case 0x10ec0663:
-       case 0x10ec0665:
-       case 0x10ec0670:
-       case 0x10ec0671:
-       case 0x10ec0672:
-               alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
-               break;
-       }
-}
-
 /*
  */
 static int patch_alc662(struct hda_codec *codec)
@@ -6169,10 +6218,6 @@ static int patch_alc662(struct hda_codec *codec)
        case 0x10ec0668:
                spec->init_hook = alc668_restore_default_value;
                break;
-       default:
-               spec->init_hook = alc662_fill_coef;
-               alc662_fill_coef(codec);
-               break;
        }
 
        snd_hda_pick_fixup(codec, alc662_fixup_models,
index f119a41ed9a94986cc1ec1ccd99b1aac3c400260..7c83bab69deef832690c98c7babb00ded92d5784 100644 (file)
@@ -885,6 +885,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
        return changed;
 }
 
+static void kctl_private_value_free(struct snd_kcontrol *kctl)
+{
+       kfree((void *)kctl->private_value);
+}
+
 static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
        int validx, int bUnitID)
 {
@@ -919,6 +924,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
                return -ENOMEM;
        }
 
+       kctl->private_free = kctl_private_value_free;
        err = snd_ctl_add(mixer->chip->card, kctl);
        if (err < 0)
                return err;
index 57b9c2b7c4ffbfb5950af351f9157e97a075caac..6f6733331d9597918353464a71af1e9d9e04a251 100644 (file)
@@ -128,7 +128,7 @@ static int sock_fanout_read_ring(int fd, void *ring)
        struct tpacket2_hdr *header = ring;
        int count = 0;
 
-       while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) {
+       while (count < RING_NUM_FRAMES && header->tp_status & TP_STATUS_USER) {
                count++;
                header = ring + (count * getpagesize());
        }