o udev 081 # udevinfo -V
o grub 0.93 # grub --version
o mcelog 0.6
-o iptables 1.4.1 # iptables -V
-
Kernel compilation
==================
###
# The targets that may be used.
-PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs xmldoclinks
+PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs media
BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
-xmldocs: $(BOOKS) xmldoclinks
+xmldocs: $(BOOKS)
sgmldocs: xmldocs
PS := $(patsubst %.xml, %.ps, $(BOOKS))
pdfdocs: $(PDF)
HTML := $(sort $(patsubst %.xml, %.html, $(BOOKS)))
-htmldocs: $(HTML)
+htmldocs: media $(HTML)
$(call build_main_index)
- $(call build_images)
MAN := $(patsubst %.xml, %.9, $(BOOKS))
mandocs: $(MAN)
-build_images = mkdir -p $(objtree)/Documentation/DocBook/media/ && \
- cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(objtree)/Documentation/DocBook/media/
-
-xmldoclinks:
-ifneq ($(objtree),$(srctree))
- for dep in dvb media-entities.tmpl media-indices.tmpl v4l; do \
- rm -f $(objtree)/Documentation/DocBook/$$dep \
- && ln -s $(srctree)/Documentation/DocBook/$$dep $(objtree)/Documentation/DocBook/ \
- || exit; \
- done
-endif
+media:
+ mkdir -p $(srctree)/Documentation/DocBook/media/
+ cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(srctree)/Documentation/DocBook/media/
installmandocs: mandocs
mkdir -p /usr/local/man/man9/
identified through its new major/minor numbers encoded
in devnum.
-norecovery Don't load the journal on mounting. Note that
-noload if the filesystem was not unmounted cleanly,
+noload Don't load the journal on mounting. Note that
+ if the filesystem was not unmounted cleanly,
skipping the journal replay will lead to the
filesystem containing inconsistencies that can
lead to any number of problems.
also be used to enable or disable barriers, for
consistency with other ext4 mount options.
-inode_readahead_blks=n This tuning parameter controls the maximum
+inode_readahead=n This tuning parameter controls the maximum
number of inode table blocks that ext4's inode
table readahead algorithm will pre-read into
the buffer cache. The default value is 32 blocks.
system crashes before the delayed allocation
blocks are forced to disk.
-discard Controls whether ext4 should issue discard/TRIM
-nodiscard(*) commands to the underlying block device when
- blocks are freed. This is useful for SSD devices
- and sparse/thinly-provisioned LUNs, but it is off
- by default until sufficient testing has been done.
-
Data Mode
=========
There are 3 different data modes:
to a common usb-storage quirk flag as follows:
a = SANE_SENSE (collect more than 18 bytes
of sense data);
- b = BAD_SENSE (don't collect more than 18
- bytes of sense data);
c = FIX_CAPACITY (decrease the reported
device capacity by one sector);
h = CAPACITY_HEURISTICS (decrease the
} chip;
};
-4.27 KVM_GET_CLOCK
-
-Capability: KVM_CAP_ADJUST_CLOCK
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_clock_data (out)
-Returns: 0 on success, -1 on error
-
-Gets the current timestamp of kvmclock as seen by the current guest. In
-conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
-such as migration.
-
-struct kvm_clock_data {
- __u64 clock; /* kvmclock current value */
- __u32 flags;
- __u32 pad[9];
-};
-
-4.28 KVM_SET_CLOCK
-
-Capability: KVM_CAP_ADJUST_CLOCK
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_clock_data (in)
-Returns: 0 on success, -1 on error
-
-Sets the current timestamp of kvmclock to the valued specific in its parameter.
-In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
-such as migration.
-
-struct kvm_clock_data {
- __u64 clock; /* kvmclock current value */
- __u32 flags;
- __u32 pad[9];
-};
-
5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by
171 -> Beholder BeholdTV X7 [5ace:7595]
172 -> RoverMedia TV Link Pro FM [19d1:0138]
173 -> Zolid Hybrid TV Tuner PCI [1131:2004]
-174 -> Asus Europa Hybrid OEM [1043:4847]
ov519 041e:4060 Creative Live! VISTA VF0350
ov519 041e:4061 Creative Live! VISTA VF0400
ov519 041e:4064 Creative Live! VISTA VF0420
-ov519 041e:4067 Creative Live! Cam Video IM (VF0350)
ov519 041e:4068 Creative Live! VISTA VF0470
spca561 0458:7004 Genius VideoCAM Express V2
sunplus 0458:7006 Genius Dsc 1.3 Smart
F: drivers/net/wireless/rndis_wlan.c
USB XHCI DRIVER
-M: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+M: Sarah Sharp <sarah.a.sharp@intel.com>
L: linux-usb@vger.kernel.org
S: Supported
-F: drivers/usb/host/xhci*
-F: drivers/usb/host/pci-quirks*
USB ZC0301 DRIVER
M: Luca Risolia <luca.risolia@studio.unibo.it>
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 32
-EXTRAVERSION = .9
+EXTRAVERSION =
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, off)
{
- unsigned long ret = -EINVAL;
+ struct file *file = NULL;
+ unsigned long ret = -EBADF;
#if 0
if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
printk("%s: unimplemented OSF mmap flags %04lx\n",
current->comm, flags);
#endif
- if ((off + PAGE_ALIGN(len)) < off)
- goto out;
- if (off & ~PAGE_MASK)
- goto out;
- ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down_write(¤t->mm->mmap_sem);
+ ret = do_mmap(file, addr, len, prot, flags, off);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
out:
return ret;
}
#include <asm-generic/mman.h>
-
-#define arch_mmap_check(addr, len, flags) \
- (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0)
/* 160 */ CALL(sys_sched_get_priority_min)
CALL(sys_sched_rr_get_interval)
CALL(sys_nanosleep)
- CALL(sys_mremap)
+ CALL(sys_arm_mremap)
CALL(sys_setresuid16)
/* 165 */ CALL(sys_getresuid16)
CALL(sys_ni_syscall) /* vm86 */
tst r5, #PGOFF_MASK
moveq r5, r5, lsr #PAGE_SHIFT - 12
streq r5, [sp, #4]
- beq sys_mmap_pgoff
+ beq do_mmap2
mov r0, #-EINVAL
mov pc, lr
#else
str r5, [sp, #4]
- b sys_mmap_pgoff
+ b do_mmap2
#endif
ENDPROC(sys_mmap2)
#include <linux/ipc.h>
#include <linux/uaccess.h>
+extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
+ unsigned long new_len, unsigned long flags,
+ unsigned long new_addr);
+
+/* common code for old and new mmaps */
+inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EINVAL;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS)
+ goto out;
+
+ error = -EBADF;
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
if (a.offset & ~PAGE_MASK)
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
out:
return error;
}
+asmlinkage unsigned long
+sys_arm_mremap(unsigned long addr, unsigned long old_len,
+ unsigned long new_len, unsigned long flags,
+ unsigned long new_addr)
+{
+ unsigned long ret = -EINVAL;
+
+ if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS)
+ goto out;
+
+ down_write(¤t->mm->mmap_sem);
+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+ up_write(¤t->mm->mmap_sem);
+
+out:
+ return ret;
+}
+
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls.
.part_no = 0xb770,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DM6467,
- .name = "dm6467_rev1.x",
- },
- {
- .variant = 0x1,
- .part_no = 0xb770,
- .manufacturer = 0x017,
- .cpu_id = DAVINCI_CPU_ID_DM6467,
- .name = "dm6467_rev3.x",
+ .name = "dm6467",
},
};
goto err_free_vbus_gpio;
/* USB Hub power-on and reset */
- gpio_direction_output(usb_hub_reset, 1);
- gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
+ gpio_direction_output(usb_hub_reset, 0);
regulator_enable(em_x270_usb_ldo);
- gpio_set_value(usb_hub_reset, 0);
gpio_set_value(usb_hub_reset, 1);
+ gpio_set_value(usb_hub_reset, 0);
regulator_disable(em_x270_usb_ldo);
regulator_enable(em_x270_usb_ldo);
- gpio_set_value(usb_hub_reset, 0);
- gpio_set_value(GPIO9_USB_VBUS_EN, 1);
+ gpio_set_value(usb_hub_reset, 1);
+
+ /* enable VBUS */
+ gpio_direction_output(GPIO9_USB_VBUS_EN, 1);
return 0;
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
- if (aliasing && flags & MAP_SHARED &&
- (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
return -EINVAL;
return addr;
}
struct pt_regs *);
asmlinkage int sys_rt_sigreturn(struct pt_regs *);
+/* kernel/sys_avr32.c */
+asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, off_t);
+
/* mm/cache.c */
asmlinkage int sys_cacheflush(int, void __user *, size_t);
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
#include <linux/unistd.h>
+#include <asm/mman.h>
+#include <asm/uaccess.h>
+#include <asm/syscalls.h>
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset)
+{
+ int error = -EBADF;
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ return error;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, offset);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+ return error;
+}
+
int kernel_execve(const char *file, char **argv, char **envp)
{
register long scno asm("r8") = __NR_execve;
__sys_mmap2:
pushm lr
st.w --sp, ARG6
- call sys_mmap_pgoff
+ call sys_mmap2
sub sp, -4
popm pc
#include <asm-generic/page.h>
#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
-#define VM_DATA_DEFAULT_FLAGS \
- (VM_READ | VM_WRITE | \
- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
#endif
#include <asm/cacheflush.h>
#include <asm/dma.h>
+/* common code for old and new mmaps */
+static inline long
+do_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+ out:
+ return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
{
return sram_alloc_with_lsl(size, flags);
.long _sys_ni_syscall /* streams2 */
.long _sys_vfork /* 190 */
.long _sys_getrlimit
- .long _sys_mmap_pgoff
+ .long _sys_mmap2
.long _sys_truncate64
.long _sys_ftruncate64
.long _sys_stat64 /* 195 */
#include <asm/uaccess.h>
#include <asm/segment.h>
+/* common code for old and new mmaps */
+static inline long
+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
asmlinkage unsigned long old_mmap(unsigned long __user *args)
{
unsigned long buffer[6];
if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */
goto out;
- err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3],
+ err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3],
buffer[4], buffer[5] >> PAGE_SHIFT);
out:
return err;
sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff)
{
- /* bug(?): 8Kb pages here */
- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
}
/*
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#ifdef CONFIG_MMU
#define VM_DATA_DEFAULT_FLAGS \
(VM_READ | VM_WRITE | \
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif
#endif /* __ASSEMBLY__ */
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
+ int error = -EBADF;
+ struct file * file = NULL;
+
/* As with sparc32, make sure the shift for mmap2 is constant
(12), no matter what PAGE_SIZE we have.... */
trying to map something we can't */
if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
return -EINVAL;
+ pgoff >>= PAGE_SHIFT - 12;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+#if 0 /* DAVIDM - do we want this */
+struct mmap_arg_struct64 {
+ __u32 addr;
+ __u32 len;
+ __u32 prot;
+ __u32 flags;
+ __u64 offset; /* 64 bits */
+ __u32 fd;
+};
+
+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct64 a;
+ unsigned long pgoff;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+
+ if ((long)a.offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ pgoff = a.offset >> PAGE_SHIFT;
+ if ((a.offset >> PAGE_SHIFT) != pgoff)
+ return -EINVAL;
+
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(a.fd);
+ if (!file)
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT - 12));
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
+out:
+ return error;
}
+#endif
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
#include <asm/traps.h>
#include <asm/unistd.h>
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
if (a.offset & ~PAGE_MASK)
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
- a.offset >> PAGE_SHIFT);
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+ return error;
+}
+
+#if 0 /* DAVIDM - do we want this */
+struct mmap_arg_struct64 {
+ __u32 addr;
+ __u32 len;
+ __u32 prot;
+ __u32 flags;
+ __u64 offset; /* 64 bits */
+ __u32 fd;
+};
+
+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct64 a;
+ unsigned long pgoff;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+
+ if ((long)a.offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ pgoff = a.offset >> PAGE_SHIFT;
+ if ((a.offset >> PAGE_SHIFT) != pgoff)
+ return -EINVAL;
+
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(a.fd);
+ if (!file)
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
out:
return error;
}
+#endif
struct sel_arg_struct {
unsigned long n;
.long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
.long SYMBOL_NAME(sys_vfork) /* 190 */
.long SYMBOL_NAME(sys_getrlimit)
- .long SYMBOL_NAME(sys_mmap_pgoff)
+ .long SYMBOL_NAME(sys_mmap2)
.long SYMBOL_NAME(sys_truncate64)
.long SYMBOL_NAME(sys_ftruncate64)
.long SYMBOL_NAME(sys_stat64) /* 195 */
prot = get_prot32(prot);
- if (flags & MAP_HUGETLB)
- return -ENOMEM;
-
#if PAGE_SHIFT > IA32_PAGE_SHIFT
mutex_lock(&ia32_mmap_mutex);
{
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
-extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
-extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
/*
* String version of IO memory access ops:
asmlinkage unsigned long
ia64_brk (unsigned long brk)
{
- unsigned long retval = sys_brk(brk);
+ unsigned long rlim, retval, newbrk, oldbrk;
+ struct mm_struct *mm = current->mm;
+
+ /*
+ * Most of this replicates the code in sys_brk() except for an additional safety
+ * check and the clearing of r8. However, we can't call sys_brk() because we need
+ * to acquire the mmap_sem before we can do the test...
+ */
+ down_write(&mm->mmap_sem);
+
+ if (brk < mm->end_code)
+ goto out;
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(mm->brk);
+ if (oldbrk == newbrk)
+ goto set_brk;
+
+ /* Always allow shrinking brk. */
+ if (brk <= mm->brk) {
+ if (!do_munmap(mm, newbrk, oldbrk-newbrk))
+ goto set_brk;
+ goto out;
+ }
+
+ /* Check against unimplemented/unmapped addresses: */
+ if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
+ goto out;
+
+ /* Check against rlimit.. */
+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+ goto out;
+
+ /* Check against existing mmap mappings. */
+ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ goto out;
+
+ /* Ok, looks good - let it rip. */
+ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
+ goto out;
+set_brk:
+ mm->brk = brk;
+out:
+ retval = mm->brk;
+ up_write(&mm->mmap_sem);
force_successful_syscall_return();
return retval;
}
return 0;
}
+static inline unsigned long
+do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
+{
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ return -EBADF;
+
+ if (!file->f_op || !file->f_op->mmap) {
+ addr = -ENODEV;
+ goto out;
+ }
+ }
+
+ /* Careful about overflows.. */
+ len = PAGE_ALIGN(len);
+ if (!len || len > TASK_SIZE) {
+ addr = -EINVAL;
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+out: if (file)
+ fput(file);
+ return addr;
+}
+
/*
* mmap2() is like mmap() except that the offset is expressed in units
* of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
asmlinkage unsigned long
sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
{
- addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+ addr = do_mmap2(addr, len, prot, flags, fd, pgoff);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
if (offset_in_page(off) != 0)
return -EINVAL;
- addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
}
-void __iomem *
-early_ioremap (unsigned long phys_addr, unsigned long size)
-{
- return __ioremap(phys_addr);
-}
-
void __iomem *
ioremap (unsigned long phys_addr, unsigned long size)
{
}
EXPORT_SYMBOL(ioremap_nocache);
-void
-early_iounmap (volatile void __iomem *addr, unsigned long size)
-{
-}
-
void
iounmap (volatile void __iomem *addr)
{
return oldval;
}
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap_pgoff
+ .long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
#include <asm/page.h>
#include <asm/unistd.h>
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
- /*
- * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
- * so we need to shift the argument down by 1; m68k mmap64(3)
- * (in libc) expects the last argument of mmap2 in 4Kb units.
- */
- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
}
/*
if (a.offset & ~PAGE_MASK)
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
- a.offset >> PAGE_SHIFT);
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+ return error;
+}
+
+#if 0
+struct mmap_arg_struct64 {
+ __u32 addr;
+ __u32 len;
+ __u32 prot;
+ __u32 flags;
+ __u64 offset; /* 64 bits */
+ __u32 fd;
+};
+
+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct64 a;
+ unsigned long pgoff;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+
+ if ((long)a.offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ pgoff = a.offset >> PAGE_SHIFT;
+ if ((a.offset >> PAGE_SHIFT) != pgoff)
+ return -EINVAL;
+
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(a.fd);
+ if (!file)
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
out:
return error;
}
+#endif
struct sel_arg_struct {
unsigned long n;
#include <asm/cacheflush.h>
#include <asm/unistd.h>
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
if (a.offset & ~PAGE_MASK)
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
- a.offset >> PAGE_SHIFT);
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
out:
return error;
}
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap_pgoff
+ .long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
return error;
}
+asmlinkage long
+sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ struct file *file = NULL;
+ int ret = -EBADF;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file) {
+ printk(KERN_INFO "no fd in mmap\r\n");
+ goto out;
+ }
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
+out:
+ return ret;
+}
+
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t pgoff)
{
- if (pgoff & ~PAGE_MASK)
- return -EINVAL;
+ int err = -EINVAL;
+
+ if (pgoff & ~PAGE_MASK) {
+ printk(KERN_INFO "no pagemask in mmap\r\n");
+ goto out;
+ }
- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
+ err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
+out:
+ return err;
}
/*
.long sys_ni_syscall /* reserved for streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap_pgoff /* mmap2 */
+ .long sys_mmap2 /* mmap2 */
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, pgoff)
{
+ struct file * file = NULL;
unsigned long error;
error = -EINVAL;
if (pgoff & (~PAGE_MASK >> 12))
goto out;
- error = sys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT-12));
+ pgoff >>= PAGE_SHIFT-12;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
+
out:
return error;
}
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
return -EINVAL;
return addr;
}
}
}
+/* common code for old and new mmaps */
+static inline unsigned long
+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long fd, unsigned long pgoff)
+{
+ unsigned long error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long,
fd, off_t, offset)
if (offset & ~PAGE_MASK)
goto out;
- result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
out:
return result;
if (pgoff & (~PAGE_MASK >> 12))
return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
}
save_static_function(sys_fork);
#include <asm-generic/mman.h>
-
-#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
-
-#define arch_mmap_check(addr, len, flags) \
- (((flags) & MAP_FIXED && (addr) < MIN_MAP_ADDR) ? -EINVAL : 0)
.long sys_ni_syscall /* reserved for streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap_pgoff
+ .long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
#include <asm/uaccess.h>
+#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
+
+/*
+ * memory mapping syscall
+ */
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ struct file *file = NULL;
+ long error = -EINVAL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
+ goto out;
+
+ error = -EBADF;
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
asmlinkage long old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
}
struct sel_arg_struct {
return addr;
}
+static unsigned long do_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long pgoff)
+{
+ struct file * file = NULL;
+ unsigned long error = -EBADF;
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file != NULL)
+ fput(file);
+out:
+ return error;
+}
+
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT - 12));
+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long offset)
{
if (!(offset & ~PAGE_MASK)) {
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- offset >> PAGE_SHIFT);
+ return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
} else {
return -EINVAL;
}
#ifdef __powerpc64__
# define SET_PERSONALITY(ex) \
do { \
+ unsigned long new_flags = 0; \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- set_thread_flag(TIF_32BIT); \
+ new_flags = _TIF_32BIT; \
+ if ((current_thread_info()->flags & _TIF_32BIT) \
+ != new_flags) \
+ set_thread_flag(TIF_ABI_PENDING); \
else \
- clear_thread_flag(TIF_32BIT); \
+ clear_thread_flag(TIF_ABI_PENDING); \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
-#ifdef CONFIG_MODVERSIONS
-#define ARCH_RELOCATES_KCRCTAB
-
-extern const unsigned long reloc_start[];
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
#define TIF_FREEZE 14 /* Freezing for suspend */
#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
+#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
+#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
*/
static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
unsigned int areg, struct pt_regs *regs,
- unsigned int flags, unsigned int length,
- unsigned int elsize)
+ unsigned int flags, unsigned int length)
{
char *ptr;
- unsigned long *lptr;
int ret = 0;
- int sw = 0;
- int i, j;
flush_vsx_to_thread(current);
else
ptr = (char *) ¤t->thread.vr[reg - 32];
- lptr = (unsigned long *) ptr;
-
- if (flags & SW)
- sw = elsize-1;
-
- for (j = 0; j < length; j += elsize) {
- for (i = 0; i < elsize; ++i) {
- if (flags & ST)
- ret |= __put_user(ptr[i^sw], addr + i);
- else
- ret |= __get_user(ptr[i^sw], addr + i);
+ if (flags & ST)
+ ret = __copy_to_user(addr, ptr, length);
+ else {
+ if (flags & SPLT){
+ ret = __copy_from_user(ptr, addr, length);
+ ptr += length;
}
- ptr += elsize;
- addr += elsize;
+ ret |= __copy_from_user(ptr, addr, length);
}
-
- if (!ret) {
- if (flags & U)
- regs->gpr[areg] = regs->dar;
-
- /* Splat load copies the same data to top and bottom 8 bytes */
- if (flags & SPLT)
- lptr[1] = lptr[0];
- /* For 8 byte loads, zero the top 8 bytes */
- else if (!(flags & ST) && (8 == length))
- lptr[1] = 0;
- } else
+ if (flags & U)
+ regs->gpr[areg] = regs->dar;
+ if (ret)
return -EFAULT;
-
return 1;
}
#endif
#ifdef CONFIG_VSX
if ((instruction & 0xfc00003e) == 0x7c000018) {
- unsigned int elsize;
-
- /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
+ /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
reg |= (instruction & 0x1) << 5;
/* Simple inline decoder instead of a table */
- /* VSX has only 8 and 16 byte memory accesses */
- nb = 8;
if (instruction & 0x200)
nb = 16;
-
- /* Vector stores in little-endian mode swap individual
- elements, so process them separately */
- elsize = 4;
- if (instruction & 0x80)
- elsize = 8;
-
+ else if (instruction & 0x080)
+ nb = 8;
+ else
+ nb = 4;
flags = 0;
- if (regs->msr & MSR_LE)
- flags |= SW;
if (instruction & 0x100)
flags |= ST;
if (instruction & 0x040)
nb = 8;
}
PPC_WARN_EMULATED(vsx);
- return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
+ return emulate_vsx(addr, reg, areg, regs, flags, nb);
}
#endif
/* A size of 0 indicates an instruction we don't support, with
list_for_each_entry(dev, &bus->devices, bus_list) {
struct dev_archdata *sd = &dev->dev.archdata;
- /* Cardbus can call us to add new devices to a bus, so ignore
- * those who are already fully discovered
- */
- if (dev->is_added)
- continue;
-
/* Setup OF node pointer in archdata */
sd->of_node = pci_device_to_OF_node(dev);
}
EXPORT_SYMBOL(pcibios_fixup_bus);
-void __devinit pci_fixup_cardbus(struct pci_bus *bus)
-{
- /* Now fixup devices on that bus */
- pcibios_setup_bus_devices(bus);
-}
-
-
static int skip_isa_ioresource_align(struct pci_dev *dev)
{
if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
void flush_thread(void)
{
+#ifdef CONFIG_PPC64
+ struct thread_info *t = current_thread_info();
+
+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
+ clear_ti_thread_flag(t, TIF_ABI_PENDING);
+ if (test_ti_thread_flag(t, TIF_32BIT))
+ clear_ti_thread_flag(t, TIF_32BIT);
+ else
+ set_ti_thread_flag(t, TIF_32BIT);
+ }
+#endif
+
discard_lazy_cpu_state();
if (current->thread.dabr) {
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long off, int shift)
{
+ struct file * file = NULL;
unsigned long ret = -EINVAL;
if (!arch_validate_prot(prot))
goto out;
off >>= shift;
}
+
+ ret = -EBADF;
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (!(file = fget(fd)))
+ goto out;
+ }
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off);
+ down_write(¤t->mm->mmap_sem);
+ ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
out:
return ret;
}
* all 1's
*/
mfspr r4,SPRN_VRSAVE
- cmpwi 0,r4,0
+ cmpdi 0,r4,0
bne+ 1f
li r4,-1
mtspr SPRN_VRSAVE,r4
#endif
SECTIONS
{
- . = 0;
- reloc_start = .;
-
. = KERNELBASE;
/*
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header);
#endif /* CONFIG_PPC_85xx || CONFIG_PPC_86xx */
#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
#ifndef __LINUX_KVM_S390_H
#define __LINUX_KVM_S390_H
+
/*
* asm-s390/kvm.h - KVM s390 specific structures and definitions
*
*/
#include <linux/types.h>
-#define __KVM_S390
-
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
/* general purpose regs for s390 */
u32 offset;
};
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ struct file * file = NULL;
+ unsigned long error = -EBADF;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
+ /* Result is out of bounds. */
+ do_munmap(current->mm, addr, len);
+ error = -ENOMEM;
+ }
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+
asmlinkage unsigned long
old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
{
if (a.offset & ~PAGE_MASK)
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
- a.offset >> PAGE_SHIFT);
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
out:
return error;
}
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
- lm %r2,%r6,SP_R2(%r15) # load svc arguments
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
b BASED(sysc_do_svc)
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
- lmg %r2,%r6,SP_R2(%r15) # load svc arguments
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
j sysc_do_svc
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esame mode
sam64 # switch to 64 bit mode
- llgfr %r13,%r13 # clear high-order half of base reg
- lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700
-.Lzero64:.fill 16,4,0x0
#ifdef CONFIG_ZFCPDUMP
.Lcurrent_cpu:
.long 0x0
#include <asm/uaccess.h>
#include "entry.h"
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ long error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux for S/390 isn't able to handle more than 5
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
if (a.offset & ~PAGE_MASK)
goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
out:
return error;
}
return rc2;
}
-static const intercept_handler_t intercept_funcs[] = {
+static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
[0x00 >> 2] = handle_noop,
[0x04 >> 2] = handle_instruction,
[0x08 >> 2] = handle_prog,
intercept_handler_t func;
u8 code = vcpu->arch.sie_block->icptcode;
- if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
+ if (code & 3 || code > 0x48)
return -ENOTSUPP;
func = intercept_funcs[code >> 2];
if (func)
int kvm_dev_ioctl_check_extension(long ext)
{
- int r;
-
switch (ext) {
- case KVM_CAP_S390_PSW:
- r = 1;
- break;
default:
- r = 0;
+ return 0;
}
- return r;
}
/* Section: vm related */
vcpu_load(vcpu);
if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
rc = -EBUSY;
- else {
- vcpu->run->psw_mask = psw.mask;
- vcpu->run->psw_addr = psw.addr;
- }
+ else
+ vcpu->arch.sie_block->gpsw = psw;
vcpu_put(vcpu);
return rc;
}
switch (kvm_run->exit_reason) {
case KVM_EXIT_S390_SIEIC:
+ vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
+ vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
+ break;
case KVM_EXIT_UNKNOWN:
case KVM_EXIT_INTR:
case KVM_EXIT_S390_RESET:
BUG();
}
- vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
- vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
-
might_fault();
do {
/* intercept cannot be handled in-kernel, prepare kvm-run */
kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
+ kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
+ kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
rc = 0;
rc = 0;
}
- kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
- kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
-
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
/* make sure that the new value is valid memory */
address = address & 0x7fffe000u;
- if ((copy_from_user(&tmp, (void __user *)
- (address + vcpu->arch.sie_block->gmsor) , 1)) ||
- (copy_from_user(&tmp, (void __user *)(address +
+ if ((copy_from_guest(vcpu, &tmp,
+ (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) ||
+ (copy_from_guest(vcpu, &tmp, (u64) (address +
vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
*reg |= SIGP_STAT_INVALID_PARAMETER;
return 1; /* invalid parameter */
sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff)
{
- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+ int error = -EBADF;
+ struct file *file = NULL;
+
+ if (pgoff & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ return error;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+
+ return error;
}
asmlinkage long
sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, off_t offset)
+ unsigned long flags, unsigned long fd, off_t pgoff)
{
- if (unlikely(offset & ~PAGE_MASK))
- return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
}
asmlinkage long
#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
#ifdef CONFIG_X2TLB
-#define pte_write(pte) \
- ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
+#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
#else
#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
#endif
* individually toggled (and user permissions are entirely decoupled from
* kernel permissions), we attempt to couple them a bit more sanely here.
*/
-PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
+PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
#else
void flush_thread(void)
{
- /* Called by fs/exec.c (setup_new_exec) to remove traces of a
+ /* Called by fs/exec.c (flush_old_exec) to remove traces of a
* previously running executable. */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
+static inline long
+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, int fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off)
{
if (off & ~PAGE_MASK)
return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+ return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
}
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
pgoff >>= PAGE_SHIFT - 12;
- return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
}
/*
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
return -EINVAL;
return addr;
}
LDFLAGS := -m elf32_sparc
CHECKFLAGS += -D__sparc__
export BITS := 32
-UTS_MACHINE := sparc
#KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
LDFLAGS := -m elf64_sparc
export BITS := 64
-UTS_MACHINE := sparc64
KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \
-ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) \
-do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- set_thread_flag(TIF_32BIT); \
+do { unsigned long new_flags = current_thread_info()->flags; \
+ new_flags &= _TIF_32BIT; \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ new_flags |= _TIF_32BIT; \
else \
- clear_thread_flag(TIF_32BIT); \
+ new_flags &= ~_TIF_32BIT; \
+ if ((current_thread_info()->flags & _TIF_32BIT) \
+ != new_flags) \
+ set_thread_flag(TIF_ABI_PENDING); \
+ else \
+ clear_thread_flag(TIF_ABI_PENDING); \
/* flush_thread will update pgd cache */ \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
/* flag bit 8 is available */
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
*/
-/* flag bit 12 is available */
+#define TIF_ABI_PENDING 12
#define TIF_MEMDIE 13
#define TIF_POLLING_NRFLAG 14
#define TIF_FREEZE 15 /* is freezing for suspend */
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE)
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
lp->rx_irq_name, lp);
if (err)
return err;
err = request_irq(lp->cfg.tx_irq, ldc_tx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
int cpu = smp_processor_id();
clear_softint(1 << irq);
+ pcr_ops->write(PCR_PIC_PRIV);
local_cpu_data().__nmi_count++;
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
- else
- pcr_ops->write(PCR_PIC_PRIV);
sum = kstat_irqs_cpu(0, cpu);
if (__get_cpu_var(nmi_touch)) {
int i;
/* Check address type match */
- if (!((addr[0] ^ range[0]) & 0x03000000))
- goto type_match;
-
- /* Special exception, we can map a 64-bit address into
- * a 32-bit range.
- */
- if ((addr[0] & 0x03000000) == 0x03000000 &&
- (range[0] & 0x03000000) == 0x02000000)
- goto type_match;
-
- return -EINVAL;
+ if ((addr[0] ^ range[0]) & 0x03000000)
+ return -EINVAL;
-type_match:
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
data.addr = 0;
cpuc = &__get_cpu_var(cpu_hw_events);
-
- /* If the PMU has the TOE IRQ enable bits, we need to do a
- * dummy write to the %pcr to clear the overflow bits and thus
- * the interrupt.
- *
- * Do this before we peek at the counters to determine
- * overflow so we don't lose any events.
- */
- if (sparc_pmu->irq_bit)
- pcr_ops->write(cpuc->pcr);
-
for (idx = 0; idx < MAX_HWEVENTS; idx++) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
struct thread_info *t = current_thread_info();
struct mm_struct *mm;
+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
+ clear_ti_thread_flag(t, TIF_ABI_PENDING);
+ if (test_ti_thread_flag(t, TIF_32BIT))
+ clear_ti_thread_flag(t, TIF_32BIT);
+ else
+ set_ti_thread_flag(t, TIF_32BIT);
+ }
+
mm = t->task->mm;
if (mm)
tsb_context_switch(mm);
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
- ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
}
}
+asmlinkage unsigned long sparc_brk(unsigned long brk)
+{
+ if(ARCH_SUN4C) {
+ if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
+ return current->mm->brk;
+ }
+ return sys_brk(brk);
+}
+
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
}
/* Linux version of mmap */
+static unsigned long do_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long pgoff)
+{
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ len = PAGE_ALIGN(len);
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(¤t->mm->mmap_sem);
+ retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return retval;
+}
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT - 12));
+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long off)
{
- /* no alignment check? */
- return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
}
long sparc_remap_file_pages(unsigned long start, unsigned long size,
(pgoff >> (PAGE_SHIFT - 12)), flags);
}
+extern unsigned long do_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+
+asmlinkage unsigned long sparc_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr)
+{
+ unsigned long ret = -EINVAL;
+
+ if (unlikely(sparc_mmap_check(addr, old_len)))
+ goto out;
+ if (unlikely(sparc_mmap_check(new_addr, new_len)))
+ goto out;
+ down_write(¤t->mm->mmap_sem);
+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+ up_write(¤t->mm->mmap_sem);
+out:
+ return ret;
+}
+
/* we come to here via sys_nis_syscall so it can setup the regs argument */
asmlinkage unsigned long
c_sys_nis_syscall (struct pt_regs *regs)
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
unsigned long align_goal, addr = -ENOMEM;
- unsigned long (*get_area)(struct file *, unsigned long,
- unsigned long, unsigned long, unsigned long);
-
- get_area = current->mm->get_unmapped_area;
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return get_area(NULL, orig_addr, len, pgoff, flags);
+ return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
}
flags &= ~MAP_SHARED;
align_goal = (64UL * 1024);
do {
- addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+ addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = get_area(NULL, orig_addr, len, pgoff, flags);
+ addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
return addr;
}
}
}
+SYSCALL_DEFINE1(sparc_brk, unsigned long, brk)
+{
+ /* People could try to be nasty and use ta 0x6d in 32bit programs */
+ if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
+ return current->mm->brk;
+
+ if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
+ return current->mm->brk;
+
+ return sys_brk(brk);
+}
+
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, off)
{
- unsigned long retval = -EINVAL;
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
- if ((off + PAGE_ALIGN(len)) < off)
- goto out;
- if (off & ~PAGE_MASK)
- goto out;
- retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ len = PAGE_ALIGN(len);
+
+ down_write(¤t->mm->mmap_sem);
+ retval = do_mmap(file, addr, len, prot, flags, off);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
out:
return retval;
}
if (test_thread_flag(TIF_32BIT))
goto out;
+ if (unlikely(new_len >= VA_EXCLUDE_START))
+ goto out;
+ if (unlikely(sparc_mmap_check(addr, old_len)))
+ goto out;
+ if (unlikely(sparc_mmap_check(new_addr, new_len)))
+ goto out;
down_write(¤t->mm->mmap_sem);
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
struct new_utsname;
extern asmlinkage unsigned long sys_getpagesize(void);
+extern asmlinkage unsigned long sparc_brk(unsigned long brk);
extern asmlinkage long sparc_pipe(struct pt_regs *regs);
extern asmlinkage long sys_ipc(unsigned int call, int first,
unsigned long second,
/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
+/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek
/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
-/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek
+/*15*/ .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek
+/*15*/ .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
2: sethi %hi(softirq_stack), %g3
or %g3, %lo(softirq_stack), %g3
ldx [%g3 + %g1], %g7
- sub %g7, STACK_BIAS, %g7
cmp %sp, %g7
- bleu,pt %xcc, 3f
+ bleu,pt %xcc, 2f
sethi %hi(THREAD_SIZE), %g3
add %g7, %g3, %g7
cmp %sp, %g7
* again, we are already trying to output the stack overflow
* message.
*/
-3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
+ sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
or %g7, %lo(ovstack), %g7
add %g7, OVSTACKSIZE, %g3
sub %g3, STACK_BIAS + 192, %g3
#include "linux/mm.h"
#include "linux/sched.h"
#include "linux/utsname.h"
-#include "linux/syscalls.h"
#include "asm/current.h"
#include "asm/mman.h"
#include "asm/uaccess.h"
return ret;
}
+/* common code for old and new mmaps */
+long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ long error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+ out:
+ return error;
+}
+
long old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long offset)
if (offset & ~PAGE_MASK)
goto out;
- err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
out:
return err;
}
#define EXECUTE_SYSCALL(syscall, regs) \
((long (*)(struct syscall_args)) \
(*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs))
+
+extern long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
/dev/cpu/31/cpuid.
+config X86_CPU_DEBUG
+ tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
+ ---help---
+ If you select this option, this will provide various x86 CPUs
+ information through debugfs.
+
choice
prompt "High Memory Support"
default HIGHMEM4G if !X86_NUMAQ
config X86_CMPXCHG64
def_bool y
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
+ depends on !M386 && !M486
# this should be set for all -march=.. options where the compiler
# generates cmov.
# cpu entries
cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
-# Work around the pentium-mmx code generator madness of gcc4.4.x which
-# does stack alignment by generating horrible code _before_ the mcount
-# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
-# tracer assumptions. For i686, generic, core2 this is set by the
-# compiler anyway
-cflags-$(CONFIG_FUNCTION_GRAPH_TRACER) += $(call cc-option,-maccumulate-outgoing-args)
-
# Bug fix for binutils: this option is required in order to keep
# binutils from generating NOPL instructions against our will.
ifneq ($(CONFIG_X86_P6_NOP),y)
if (retval)
return retval;
- /* OK, This is the point of no return */
- set_personality(PER_LINUX);
- set_thread_flag(TIF_IA32);
-
- setup_new_exec(bprm);
-
regs->cs = __USER32_CS;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
regs->r13 = regs->r14 = regs->r15 = 0;
+ /* OK, This is the point of no return */
+ set_personality(PER_LINUX);
+ set_thread_flag(TIF_IA32);
+ clear_thread_flag(TIF_ABI_PENDING);
+
current->mm->end_code = ex.a_text +
(current->mm->start_code = N_TXTADDR(ex));
current->mm->end_data = ex.a_data +
.quad quiet_ni_syscall /* streams2 */
.quad stub32_vfork /* 190 */
.quad compat_sys_getrlimit
- .quad sys_mmap_pgoff
+ .quad sys32_mmap2
.quad sys32_truncate64
.quad sys32_ftruncate64
.quad sys32_stat64 /* 195 */
asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
{
struct mmap_arg_struct a;
+ struct file *file = NULL;
+ unsigned long retval;
+ struct mm_struct *mm ;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (a.offset & ~PAGE_MASK)
return -EINVAL;
- return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ file = fget(a.fd);
+ if (!file)
+ return -EBADF;
+ }
+
+ mm = current->mm;
+ down_write(&mm->mmap_sem);
+ retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
a.offset>>PAGE_SHIFT);
+ if (file)
+ fput(file);
+
+ up_write(&mm->mmap_sem);
+
+ return retval;
}
asmlinkage long sys32_mprotect(unsigned long start, size_t len,
return ret;
}
+asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long error;
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ return -EBADF;
+ }
+
+ down_write(&mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&mm->mmap_sem);
+
+ if (file)
+ fput(file);
+ return error;
+}
+
asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
{
char *arch = "x86_64";
extern void amd_iommu_flush_all_devices(void);
extern void amd_iommu_shutdown(void);
extern void amd_iommu_apply_erratum_63(u16 devid);
-extern void amd_iommu_init_api(void);
#else
static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { }
--- /dev/null
+#ifndef _ASM_X86_CPU_DEBUG_H
+#define _ASM_X86_CPU_DEBUG_H
+
+/*
+ * CPU x86 architecture debug
+ *
+ * Copyright(C) 2009 Jaswinder Singh Rajput
+ */
+
+/* Register flags */
+enum cpu_debug_bit {
+/* Model Specific Registers (MSRs) */
+ CPU_MC_BIT, /* Machine Check */
+ CPU_MONITOR_BIT, /* Monitor */
+ CPU_TIME_BIT, /* Time */
+ CPU_PMC_BIT, /* Performance Monitor */
+ CPU_PLATFORM_BIT, /* Platform */
+ CPU_APIC_BIT, /* APIC */
+ CPU_POWERON_BIT, /* Power-on */
+ CPU_CONTROL_BIT, /* Control */
+ CPU_FEATURES_BIT, /* Features control */
+ CPU_LBRANCH_BIT, /* Last Branch */
+ CPU_BIOS_BIT, /* BIOS */
+ CPU_FREQ_BIT, /* Frequency */
+ CPU_MTTR_BIT, /* MTRR */
+ CPU_PERF_BIT, /* Performance */
+ CPU_CACHE_BIT, /* Cache */
+ CPU_SYSENTER_BIT, /* Sysenter */
+ CPU_THERM_BIT, /* Thermal */
+ CPU_MISC_BIT, /* Miscellaneous */
+ CPU_DEBUG_BIT, /* Debug */
+ CPU_PAT_BIT, /* PAT */
+ CPU_VMX_BIT, /* VMX */
+ CPU_CALL_BIT, /* System Call */
+ CPU_BASE_BIT, /* BASE Address */
+ CPU_VER_BIT, /* Version ID */
+ CPU_CONF_BIT, /* Configuration */
+ CPU_SMM_BIT, /* System mgmt mode */
+ CPU_SVM_BIT, /*Secure Virtual Machine*/
+ CPU_OSVM_BIT, /* OS-Visible Workaround*/
+/* Standard Registers */
+ CPU_TSS_BIT, /* Task Stack Segment */
+ CPU_CR_BIT, /* Control Registers */
+ CPU_DT_BIT, /* Descriptor Table */
+/* End of Registers flags */
+ CPU_REG_ALL_BIT, /* Select all Registers */
+};
+
+#define CPU_REG_ALL (~0) /* Select all Registers */
+
+#define CPU_MC (1 << CPU_MC_BIT)
+#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
+#define CPU_TIME (1 << CPU_TIME_BIT)
+#define CPU_PMC (1 << CPU_PMC_BIT)
+#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
+#define CPU_APIC (1 << CPU_APIC_BIT)
+#define CPU_POWERON (1 << CPU_POWERON_BIT)
+#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
+#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
+#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
+#define CPU_BIOS (1 << CPU_BIOS_BIT)
+#define CPU_FREQ (1 << CPU_FREQ_BIT)
+#define CPU_MTRR (1 << CPU_MTTR_BIT)
+#define CPU_PERF (1 << CPU_PERF_BIT)
+#define CPU_CACHE (1 << CPU_CACHE_BIT)
+#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
+#define CPU_THERM (1 << CPU_THERM_BIT)
+#define CPU_MISC (1 << CPU_MISC_BIT)
+#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
+#define CPU_PAT (1 << CPU_PAT_BIT)
+#define CPU_VMX (1 << CPU_VMX_BIT)
+#define CPU_CALL (1 << CPU_CALL_BIT)
+#define CPU_BASE (1 << CPU_BASE_BIT)
+#define CPU_VER (1 << CPU_VER_BIT)
+#define CPU_CONF (1 << CPU_CONF_BIT)
+#define CPU_SMM (1 << CPU_SMM_BIT)
+#define CPU_SVM (1 << CPU_SVM_BIT)
+#define CPU_OSVM (1 << CPU_OSVM_BIT)
+#define CPU_TSS (1 << CPU_TSS_BIT)
+#define CPU_CR (1 << CPU_CR_BIT)
+#define CPU_DT (1 << CPU_DT_BIT)
+
+/* Register file flags */
+enum cpu_file_bit {
+ CPU_INDEX_BIT, /* index */
+ CPU_VALUE_BIT, /* value */
+};
+
+#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
+
+#define MAX_CPU_FILES 512
+
+struct cpu_private {
+ unsigned cpu;
+ unsigned type;
+ unsigned reg;
+ unsigned file;
+};
+
+struct cpu_debug_base {
+ char *name; /* Register name */
+ unsigned flag; /* Register flag */
+ unsigned write; /* Register write flag */
+};
+
+/*
+ * Currently it looks similar to cpu_debug_base but once we add more files
+ * cpu_file_base will go in different direction
+ */
+struct cpu_file_base {
+ char *name; /* Register file name */
+ unsigned flag; /* Register file flag */
+ unsigned write; /* Register write flag */
+};
+
+struct cpu_cpuX_base {
+ struct dentry *dentry; /* Register dentry */
+ int init; /* Register index file */
+};
+
+struct cpu_debug_range {
+ unsigned min; /* Register range min */
+ unsigned max; /* Register range max */
+ unsigned flag; /* Supported flags */
+};
+
+#endif /* _ASM_X86_CPU_DEBUG_H */
set_fs(USER_DS); \
} while (0)
-void set_personality_ia32(void);
-#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
+#define COMPAT_SET_PERSONALITY(ex) \
+do { \
+ if (test_thread_flag(TIF_IA32)) \
+ clear_thread_flag(TIF_ABI_PENDING); \
+ else \
+ set_thread_flag(TIF_ABI_PENDING); \
+ current->personality |= force_personality32; \
+} while (0)
#define COMPAT_ELF_PLATFORM ("i686")
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
extern int hpet_force_user;
-extern u8 hpet_msi_disable;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
*/
#define LOCAL_PENDING_VECTOR 0xec
-#define UV_BAU_MESSAGE 0xea
+#define UV_BAU_MESSAGE 0xec
/*
* Self IPI vector for machine checks
u8 seg_override;
unsigned int d;
unsigned long regs[NR_VCPU_REGS];
- unsigned long eip, eip_orig;
+ unsigned long eip;
/* modrm */
u8 modrm;
u8 modrm_mod;
unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
u64 vm_init_tsc;
- s64 kvmclock_offset;
};
struct kvm_vm_stat {
void mce_log_therm_throt_event(__u64 status);
-#ifdef CONFIG_X86_THERMAL_VECTOR
-extern void mcheck_intel_therm_init(void);
-#else
-static inline void mcheck_intel_therm_init(void) { }
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
};
};
-struct msr_info {
- u32 msr_no;
- struct msr reg;
- struct msr *msrs;
- int err;
-};
-
-struct msr_regs_info {
- u32 *regs;
- int err;
-};
-
static inline unsigned long long native_read_tscp(unsigned int *aux)
{
unsigned long low, high;
#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
-struct msr *msrs_alloc(void);
-void msrs_free(struct msr *msrs);
-
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
-void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
+void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
+void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
- asm volatile("cpuid"
+ asm("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
asmlinkage long sys32_personality(unsigned long);
asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
+asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+
struct oldold_utsname;
struct old_utsname;
asmlinkage long sys32_olduname(struct oldold_utsname __user *);
struct oldold_utsname;
struct old_utsname;
+asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
asmlinkage int old_mmap(struct mmap_arg_struct __user *);
asmlinkage int old_select(struct sel_arg_struct __user *);
asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_IA32 17 /* 32bit process */
#define TIF_FORK 18 /* ret_from_fork */
+#define TIF_ABI_PENDING 19
#define TIF_MEMDIE 20
#define TIF_DEBUG 21 /* uses debug registers */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_IA32 (1 << TIF_IA32)
#define _TIF_FORK (1 << TIF_FORK)
+#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
#define _TIF_DEBUG (1 << TIF_DEBUG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_FREEZE (1 << TIF_FREEZE)
* contiguous (although various IO spaces may punch holes in
* it)..
*
- * N - Number of bits in the node portion of a socket physical
- * address.
+ * N - Number of bits in the node portion of a socket physical
+ * address.
*
- * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
- * routers always have low bit of 1, C/MBricks have low bit
- * equal to 0. Most addressing macros that target UV hub chips
- * right shift the NASID by 1 to exclude the always-zero bit.
- * NASIDs contain up to 15 bits.
+ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
+ * routers always have low bit of 1, C/MBricks have low bit
+ * equal to 0. Most addressing macros that target UV hub chips
+ * right shift the NASID by 1 to exclude the always-zero bit.
+ * NASIDs contain up to 15 bits.
*
* GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
* of nasids.
*
- * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
- * of the nasid for socket usage.
+ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
+ * of the nasid for socket usage.
*
*
* NumaLink Global Physical Address Format:
*
*
* APICID format
- * NOTE!!!!!! This is the current format of the APICID. However, code
- * should assume that this will change in the future. Use functions
- * in this file for all APICID bit manipulations and conversion.
+ * NOTE!!!!!! This is the current format of the APICID. However, code
+ * should assume that this will change in the future. Use functions
+ * in this file for all APICID bit manipulations and conversion.
*
- * 1111110000000000
- * 5432109876543210
+ * 1111110000000000
+ * 5432109876543210
* pppppppppplc0cch
* sssssssssss
*
* Note: Processor only supports 12 bits in the APICID register. The ACPI
* tables hold all 16 bits. Software needs to be aware of this.
*
- * Unless otherwise specified, all references to APICID refer to
- * the FULL value contained in ACPI tables, not the subset in the
- * processor APICID register.
+ * Unless otherwise specified, all references to APICID refer to
+ * the FULL value contained in ACPI tables, not the subset in the
+ * processor APICID register.
*/
};
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
+#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
/*
* Local & Global MMR space macros.
- * Note: macros are intended to be used ONLY by inline functions
- * in this file - not by other kernel code.
- * n - NASID (full 15-bit global nasid)
- * g - GNODE (full 15-bit global nasid, right shifted 1)
- * p - PNODE (local part of nsids, right shifted 1)
+ * Note: macros are intended to be used ONLY by inline functions
+ * in this file - not by other kernel code.
+ * n - NASID (full 15-bit global nasid)
+ * g - GNODE (full 15-bit global nasid, right shifted 1)
+ * p - PNODE (local part of nsids, right shifted 1)
*/
#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
/*
* Macros for converting between kernel virtual addresses, socket local physical
* addresses, and UV global physical addresses.
- * Note: use the standard __pa() & __va() macros for converting
- * between socket virtual and socket physical addresses.
+ * Note: use the standard __pa() & __va() macros for converting
+ * between socket virtual and socket physical addresses.
*/
/* socket phys RAM --> UV global physical address */
* Access global MMRs using the low memory MMR32 space. This region supports
* faster MMR access but not all MMRs are accessible in this space.
*/
-static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
+static inline unsigned long *uv_global_mmr32_address(int pnode,
+ unsigned long offset)
{
return __va(UV_GLOBAL_MMR32_BASE |
UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
}
-static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
+static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
+ unsigned long val)
{
writeq(val, uv_global_mmr32_address(pnode, offset));
}
-static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
+static inline unsigned long uv_read_global_mmr32(int pnode,
+ unsigned long offset)
{
return readq(uv_global_mmr32_address(pnode, offset));
}
* Access Global MMR space using the MMR space located at the top of physical
* memory.
*/
-static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
+static inline unsigned long *uv_global_mmr64_address(int pnode,
+ unsigned long offset)
{
return __va(UV_GLOBAL_MMR64_BASE |
UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
}
-static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
+static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
+ unsigned long val)
{
writeq(val, uv_global_mmr64_address(pnode, offset));
}
-static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
+static inline unsigned long uv_read_global_mmr64(int pnode,
+ unsigned long offset)
{
return readq(uv_global_mmr64_address(pnode, offset));
}
-static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
-{
- writeb(val, uv_global_mmr64_address(pnode, offset));
-}
-
-static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
-{
- return readb(uv_global_mmr64_address(pnode, offset));
-}
-
/*
* Access hub local MMRs. Faster than using global space but only local MMRs
* are accessible.
}
}
-static inline unsigned long uv_scir_offset(int apicid)
-{
- return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
-}
-
static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
{
if (uv_cpu_hub_info(cpu)->scir.state != value) {
- uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
- uv_cpu_hub_info(cpu)->scir.offset, value);
uv_cpu_hub_info(cpu)->scir.state = value;
+ uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
}
}
* P4, Core and beyond CPUs
*/
if (c->x86_vendor == X86_VENDOR_INTEL &&
- (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
+ (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14)))
flags->bm_control = 0;
}
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
static void flush_devices_by_domain(struct protection_domain *domain)
{
struct amd_iommu *iommu;
- unsigned long i;
+ int i;
for (i = 0; i <= amd_iommu_last_bdf; ++i) {
if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
/*
* If we run in passthrough mode the device must be assigned to the
- * passthrough domain if it is detached from any other domain.
- * Make sure we can deassign from the pt_domain itself.
+ * passthrough domain if it is detached from any other domain
*/
- if (iommu_pass_through && domain != pt_domain) {
+ if (iommu_pass_through) {
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
__attach_device(iommu, pt_domain, devid);
}
struct pci_dev *dev = NULL;
struct dma_ops_domain *dma_dom;
struct amd_iommu *iommu;
- u16 devid, __devid;
+ u16 devid;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- __devid = devid = calc_devid(dev->bus->number, dev->devfn);
+ devid = calc_devid(dev->bus->number, dev->devfn);
if (devid > amd_iommu_last_bdf)
continue;
devid = amd_iommu_alias_table[devid];
init_unity_mappings_for_device(dma_dom, devid);
dma_dom->target_dev = devid;
- attach_device(iommu, &dma_dom->domain, devid);
- if (__devid != devid)
- attach_device(iommu, &dma_dom->domain, __devid);
-
list_add_tail(&dma_dom->list, &iommu_pd_list);
}
}
.dma_supported = amd_iommu_dma_supported,
};
-void __init amd_iommu_init_api(void)
-{
- register_iommu(&amd_iommu_ops);
-}
-
/*
* The function which clues the AMD IOMMU driver into dma_ops.
*/
/* Make the driver finally visible to the drivers */
dma_ops = &amd_iommu_dma_ops;
+ register_iommu(&amd_iommu_ops);
+
bus_register_notifier(&pci_bus_type, &device_nb);
amd_iommu_stats_init();
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
-/*
- * Set to true if ACPI table parsing and hardware intialization went properly
- */
-static bool amd_iommu_initialized;
-
/*
* Pointer to the device table which is shared by all AMD IOMMUs
* it is indexed by the PCI device id or the HT unit id and contains
}
WARN_ON(p != end);
- amd_iommu_initialized = true;
-
return 0;
}
*
****************************************************************************/
-static int iommu_setup_msi(struct amd_iommu *iommu)
+static int __init iommu_setup_msi(struct amd_iommu *iommu)
{
int r;
if (acpi_table_parse("IVRS", init_iommu_all) != 0)
goto free;
- if (!amd_iommu_initialized)
- goto free;
-
if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
goto free;
ret = amd_iommu_init_passthrough();
else
ret = amd_iommu_init_dma_ops();
-
if (ret)
goto free;
- amd_iommu_init_api();
-
enable_iommus();
if (iommu_pass_through)
*/
static void native_apic_write_dummy(u32 reg, u32 v)
{
- WARN_ON_ONCE(cpu_has_apic && !disable_apic);
+ WARN_ON_ONCE((cpu_has_apic || !disable_apic));
}
static u32 native_apic_read_dummy(u32 reg)
printk(KERN_DEBUG "system APIC only can use physical flat");
return 1;
}
-
- if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
- printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
- return 1;
- }
#endif
return 0;
continue;
desc_new = move_irq_desc(desc_new, node);
- cfg_new = desc_new->chip_data;
if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
irq = new;
enum map_type {map_wb, map_uc};
-static __init void map_high(char *id, unsigned long base, int pshift,
- int bshift, int max_pnode, enum map_type map_type)
+static __init void map_high(char *id, unsigned long base, int shift,
+ int max_pnode, enum map_type map_type)
{
unsigned long bytes, paddr;
- paddr = base << pshift;
- bytes = (1UL << bshift) * (max_pnode + 1);
+ paddr = base << shift;
+ bytes = (1UL << shift) * (max_pnode + 1);
printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
paddr + bytes);
if (map_type == map_uc)
gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
if (gru.s.enable)
- map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
+ map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
}
static __init void map_mmr_high(int max_pnode)
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
if (mmr.s.enable)
- map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
+ map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
}
static __init void map_mmioh_high(int max_pnode)
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
if (mmioh.s.enable)
- map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
- max_pnode, map_uc);
+ map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
}
static __init void uv_rtc_init(void)
uv_rtc_init();
for_each_present_cpu(cpu) {
- int apicid = per_cpu(x86_cpu_to_apicid, cpu);
-
nid = cpu_to_node(cpu);
- pnode = uv_apicid_to_pnode(apicid);
+ pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
blade = boot_pnode_to_blade(pnode);
lcpu = uv_blade_info[blade].nr_possible_cpus;
uv_blade_info[blade].nr_possible_cpus++;
uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
- uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
+ uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade;
max_pnode = max(pnode, max_pnode);
- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
- cpu, apicid, pnode, nid, lcpu, blade);
+ printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
+ "lcpu %d, blade %d\n",
+ cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
+ lcpu, blade);
}
/* Add blade/pnode info for nodes without cpus */
obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += bugs_64.o
+obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
+
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
--- /dev/null
+/*
+ * CPU x86 architecture debug code
+ *
+ * Copyright(C) 2009 Jaswinder Singh Rajput
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <asm/cpu_debug.h>
+#include <asm/paravirt.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/apic.h>
+#include <asm/desc.h>
+
+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
+static DEFINE_PER_CPU(int, cpu_priv_count);
+
+static DEFINE_MUTEX(cpu_debug_lock);
+
+static struct dentry *cpu_debugfs_dir;
+
+static struct cpu_debug_base cpu_base[] = {
+ { "mc", CPU_MC, 0 },
+ { "monitor", CPU_MONITOR, 0 },
+ { "time", CPU_TIME, 0 },
+ { "pmc", CPU_PMC, 1 },
+ { "platform", CPU_PLATFORM, 0 },
+ { "apic", CPU_APIC, 0 },
+ { "poweron", CPU_POWERON, 0 },
+ { "control", CPU_CONTROL, 0 },
+ { "features", CPU_FEATURES, 0 },
+ { "lastbranch", CPU_LBRANCH, 0 },
+ { "bios", CPU_BIOS, 0 },
+ { "freq", CPU_FREQ, 0 },
+ { "mtrr", CPU_MTRR, 0 },
+ { "perf", CPU_PERF, 0 },
+ { "cache", CPU_CACHE, 0 },
+ { "sysenter", CPU_SYSENTER, 0 },
+ { "therm", CPU_THERM, 0 },
+ { "misc", CPU_MISC, 0 },
+ { "debug", CPU_DEBUG, 0 },
+ { "pat", CPU_PAT, 0 },
+ { "vmx", CPU_VMX, 0 },
+ { "call", CPU_CALL, 0 },
+ { "base", CPU_BASE, 0 },
+ { "ver", CPU_VER, 0 },
+ { "conf", CPU_CONF, 0 },
+ { "smm", CPU_SMM, 0 },
+ { "svm", CPU_SVM, 0 },
+ { "osvm", CPU_OSVM, 0 },
+ { "tss", CPU_TSS, 0 },
+ { "cr", CPU_CR, 0 },
+ { "dt", CPU_DT, 0 },
+ { "registers", CPU_REG_ALL, 0 },
+};
+
+static struct cpu_file_base cpu_file[] = {
+ { "index", CPU_REG_ALL, 0 },
+ { "value", CPU_REG_ALL, 1 },
+};
+
+/* CPU Registers Range */
+static struct cpu_debug_range cpu_reg_range[] = {
+ { 0x00000000, 0x00000001, CPU_MC, },
+ { 0x00000006, 0x00000007, CPU_MONITOR, },
+ { 0x00000010, 0x00000010, CPU_TIME, },
+ { 0x00000011, 0x00000013, CPU_PMC, },
+ { 0x00000017, 0x00000017, CPU_PLATFORM, },
+ { 0x0000001B, 0x0000001B, CPU_APIC, },
+ { 0x0000002A, 0x0000002B, CPU_POWERON, },
+ { 0x0000002C, 0x0000002C, CPU_FREQ, },
+ { 0x0000003A, 0x0000003A, CPU_CONTROL, },
+ { 0x00000040, 0x00000047, CPU_LBRANCH, },
+ { 0x00000060, 0x00000067, CPU_LBRANCH, },
+ { 0x00000079, 0x00000079, CPU_BIOS, },
+ { 0x00000088, 0x0000008A, CPU_CACHE, },
+ { 0x0000008B, 0x0000008B, CPU_BIOS, },
+ { 0x0000009B, 0x0000009B, CPU_MONITOR, },
+ { 0x000000C1, 0x000000C4, CPU_PMC, },
+ { 0x000000CD, 0x000000CD, CPU_FREQ, },
+ { 0x000000E7, 0x000000E8, CPU_PERF, },
+ { 0x000000FE, 0x000000FE, CPU_MTRR, },
+
+ { 0x00000116, 0x0000011E, CPU_CACHE, },
+ { 0x00000174, 0x00000176, CPU_SYSENTER, },
+ { 0x00000179, 0x0000017B, CPU_MC, },
+ { 0x00000186, 0x00000189, CPU_PMC, },
+ { 0x00000198, 0x00000199, CPU_PERF, },
+ { 0x0000019A, 0x0000019A, CPU_TIME, },
+ { 0x0000019B, 0x0000019D, CPU_THERM, },
+ { 0x000001A0, 0x000001A0, CPU_MISC, },
+ { 0x000001C9, 0x000001C9, CPU_LBRANCH, },
+ { 0x000001D7, 0x000001D8, CPU_LBRANCH, },
+ { 0x000001D9, 0x000001D9, CPU_DEBUG, },
+ { 0x000001DA, 0x000001E0, CPU_LBRANCH, },
+
+ { 0x00000200, 0x0000020F, CPU_MTRR, },
+ { 0x00000250, 0x00000250, CPU_MTRR, },
+ { 0x00000258, 0x00000259, CPU_MTRR, },
+ { 0x00000268, 0x0000026F, CPU_MTRR, },
+ { 0x00000277, 0x00000277, CPU_PAT, },
+ { 0x000002FF, 0x000002FF, CPU_MTRR, },
+
+ { 0x00000300, 0x00000311, CPU_PMC, },
+ { 0x00000345, 0x00000345, CPU_PMC, },
+ { 0x00000360, 0x00000371, CPU_PMC, },
+ { 0x0000038D, 0x00000390, CPU_PMC, },
+ { 0x000003A0, 0x000003BE, CPU_PMC, },
+ { 0x000003C0, 0x000003CD, CPU_PMC, },
+ { 0x000003E0, 0x000003E1, CPU_PMC, },
+ { 0x000003F0, 0x000003F2, CPU_PMC, },
+
+ { 0x00000400, 0x00000417, CPU_MC, },
+ { 0x00000480, 0x0000048B, CPU_VMX, },
+
+ { 0x00000600, 0x00000600, CPU_DEBUG, },
+ { 0x00000680, 0x0000068F, CPU_LBRANCH, },
+ { 0x000006C0, 0x000006CF, CPU_LBRANCH, },
+
+ { 0x000107CC, 0x000107D3, CPU_PMC, },
+
+ { 0xC0000080, 0xC0000080, CPU_FEATURES, },
+ { 0xC0000081, 0xC0000084, CPU_CALL, },
+ { 0xC0000100, 0xC0000102, CPU_BASE, },
+ { 0xC0000103, 0xC0000103, CPU_TIME, },
+
+ { 0xC0010000, 0xC0010007, CPU_PMC, },
+ { 0xC0010010, 0xC0010010, CPU_CONF, },
+ { 0xC0010015, 0xC0010015, CPU_CONF, },
+ { 0xC0010016, 0xC001001A, CPU_MTRR, },
+ { 0xC001001D, 0xC001001D, CPU_MTRR, },
+ { 0xC001001F, 0xC001001F, CPU_CONF, },
+ { 0xC0010030, 0xC0010035, CPU_BIOS, },
+ { 0xC0010044, 0xC0010048, CPU_MC, },
+ { 0xC0010050, 0xC0010056, CPU_SMM, },
+ { 0xC0010058, 0xC0010058, CPU_CONF, },
+ { 0xC0010060, 0xC0010060, CPU_CACHE, },
+ { 0xC0010061, 0xC0010068, CPU_SMM, },
+ { 0xC0010069, 0xC001006B, CPU_SMM, },
+ { 0xC0010070, 0xC0010071, CPU_SMM, },
+ { 0xC0010111, 0xC0010113, CPU_SMM, },
+ { 0xC0010114, 0xC0010118, CPU_SVM, },
+ { 0xC0010140, 0xC0010141, CPU_OSVM, },
+ { 0xC0011022, 0xC0011023, CPU_CONF, },
+};
+
+static int is_typeflag_valid(unsigned cpu, unsigned flag)
+{
+ int i;
+
+ /* Standard Registers should be always valid */
+ if (flag >= CPU_TSS)
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
+ if (cpu_reg_range[i].flag == flag)
+ return 1;
+ }
+
+ /* Invalid */
+ return 0;
+}
+
+static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
+ int index, unsigned flag)
+{
+ if (cpu_reg_range[index].flag == flag) {
+ *min = cpu_reg_range[index].min;
+ *max = cpu_reg_range[index].max;
+ } else
+ *max = 0;
+
+ return *max;
+}
+
+/* This function can also be called with seq = NULL for printk */
+static void print_cpu_data(struct seq_file *seq, unsigned type,
+ u32 low, u32 high)
+{
+ struct cpu_private *priv;
+ u64 val = high;
+
+ if (seq) {
+ priv = seq->private;
+ if (priv->file) {
+ val = (val << 32) | low;
+ seq_printf(seq, "0x%llx\n", val);
+ } else
+ seq_printf(seq, " %08x: %08x_%08x\n",
+ type, high, low);
+ } else
+ printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
+}
+
+/* This function can also be called with seq = NULL for printk */
+static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
+{
+ unsigned msr, msr_min, msr_max;
+ struct cpu_private *priv;
+ u32 low, high;
+ int i;
+
+ if (seq) {
+ priv = seq->private;
+ if (priv->file) {
+ if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
+ &low, &high))
+ print_cpu_data(seq, priv->reg, low, high);
+ return;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
+ if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
+ continue;
+
+ for (msr = msr_min; msr <= msr_max; msr++) {
+ if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
+ continue;
+ print_cpu_data(seq, msr, low, high);
+ }
+ }
+}
+
+static void print_tss(void *arg)
+{
+ struct pt_regs *regs = task_pt_regs(current);
+ struct seq_file *seq = arg;
+ unsigned int seg;
+
+ seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
+ seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
+ seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
+ seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
+
+ seq_printf(seq, " RSI\t: %016lx\n", regs->si);
+ seq_printf(seq, " RDI\t: %016lx\n", regs->di);
+ seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
+ seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
+
+#ifdef CONFIG_X86_64
+ seq_printf(seq, " R08\t: %016lx\n", regs->r8);
+ seq_printf(seq, " R09\t: %016lx\n", regs->r9);
+ seq_printf(seq, " R10\t: %016lx\n", regs->r10);
+ seq_printf(seq, " R11\t: %016lx\n", regs->r11);
+ seq_printf(seq, " R12\t: %016lx\n", regs->r12);
+ seq_printf(seq, " R13\t: %016lx\n", regs->r13);
+ seq_printf(seq, " R14\t: %016lx\n", regs->r14);
+ seq_printf(seq, " R15\t: %016lx\n", regs->r15);
+#endif
+
+ asm("movl %%cs,%0" : "=r" (seg));
+ seq_printf(seq, " CS\t: %04x\n", seg);
+ asm("movl %%ds,%0" : "=r" (seg));
+ seq_printf(seq, " DS\t: %04x\n", seg);
+ seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
+ asm("movl %%es,%0" : "=r" (seg));
+ seq_printf(seq, " ES\t: %04x\n", seg);
+ asm("movl %%fs,%0" : "=r" (seg));
+ seq_printf(seq, " FS\t: %04x\n", seg);
+ asm("movl %%gs,%0" : "=r" (seg));
+ seq_printf(seq, " GS\t: %04x\n", seg);
+
+ seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
+
+ seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
+}
+
+static void print_cr(void *arg)
+{
+ struct seq_file *seq = arg;
+
+ seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
+ seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
+ seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
+ seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
+#ifdef CONFIG_X86_64
+ seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
+#endif
+}
+
+static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
+{
+ seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
+}
+
+static void print_dt(void *seq)
+{
+ struct desc_ptr dt;
+ unsigned long ldt;
+
+ /* IDT */
+ store_idt((struct desc_ptr *)&dt);
+ print_desc_ptr("IDT", seq, dt);
+
+ /* GDT */
+ store_gdt((struct desc_ptr *)&dt);
+ print_desc_ptr("GDT", seq, dt);
+
+ /* LDT */
+ store_ldt(ldt);
+ seq_printf(seq, " LDT\t: %016lx\n", ldt);
+
+ /* TR */
+ store_tr(ldt);
+ seq_printf(seq, " TR\t: %016lx\n", ldt);
+}
+
+static void print_dr(void *arg)
+{
+ struct seq_file *seq = arg;
+ unsigned long dr;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ /* Ignore db4, db5 */
+ if ((i == 4) || (i == 5))
+ continue;
+ get_debugreg(dr, i);
+ seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
+ }
+
+ seq_printf(seq, "\n MSR\t:\n");
+}
+
+static void print_apic(void *arg)
+{
+ struct seq_file *seq = arg;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ seq_printf(seq, " LAPIC\t:\n");
+ seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
+ seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
+ seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
+ seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
+ seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
+ seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
+ seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
+ seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
+ seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
+ seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
+ seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
+ seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
+ seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
+ seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
+ seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
+ seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
+ seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
+ seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
+ seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
+ seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
+ seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
+ if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
+ unsigned int i, v, maxeilvt;
+
+ v = apic_read(APIC_EFEAT);
+ maxeilvt = (v >> 16) & 0xff;
+ seq_printf(seq, " EFEAT\t\t: %08x\n", v);
+ seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
+
+ for (i = 0; i < maxeilvt; i++) {
+ v = apic_read(APIC_EILVTn(i));
+ seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
+ }
+ }
+#endif /* CONFIG_X86_LOCAL_APIC */
+ seq_printf(seq, "\n MSR\t:\n");
+}
+
+static int cpu_seq_show(struct seq_file *seq, void *v)
+{
+ struct cpu_private *priv = seq->private;
+
+ if (priv == NULL)
+ return -EINVAL;
+
+ switch (cpu_base[priv->type].flag) {
+ case CPU_TSS:
+ smp_call_function_single(priv->cpu, print_tss, seq, 1);
+ break;
+ case CPU_CR:
+ smp_call_function_single(priv->cpu, print_cr, seq, 1);
+ break;
+ case CPU_DT:
+ smp_call_function_single(priv->cpu, print_dt, seq, 1);
+ break;
+ case CPU_DEBUG:
+ if (priv->file == CPU_INDEX_BIT)
+ smp_call_function_single(priv->cpu, print_dr, seq, 1);
+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+ break;
+ case CPU_APIC:
+ if (priv->file == CPU_INDEX_BIT)
+ smp_call_function_single(priv->cpu, print_apic, seq, 1);
+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+ break;
+
+ default:
+ print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+ break;
+ }
+ seq_printf(seq, "\n");
+
+ return 0;
+}
+
+static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ if (*pos == 0) /* One time is enough ;-) */
+ return seq;
+
+ return NULL;
+}
+
+static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ (*pos)++;
+
+ return cpu_seq_start(seq, pos);
+}
+
+static void cpu_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations cpu_seq_ops = {
+ .start = cpu_seq_start,
+ .next = cpu_seq_next,
+ .stop = cpu_seq_stop,
+ .show = cpu_seq_show,
+};
+
+static int cpu_seq_open(struct inode *inode, struct file *file)
+{
+ struct cpu_private *priv = inode->i_private;
+ struct seq_file *seq;
+ int err;
+
+ err = seq_open(file, &cpu_seq_ops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = priv;
+ }
+
+ return err;
+}
+
+static int write_msr(struct cpu_private *priv, u64 val)
+{
+ u32 low, high;
+
+ high = (val >> 32) & 0xffffffff;
+ low = val & 0xffffffff;
+
+ if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
+ return 0;
+
+ return -EPERM;
+}
+
+static int write_cpu_register(struct cpu_private *priv, const char *buf)
+{
+ int ret = -EPERM;
+ u64 val;
+
+ ret = strict_strtoull(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Supporting only MSRs */
+ if (priv->type < CPU_TSS_BIT)
+ return write_msr(priv, val);
+
+ return ret;
+}
+
+static ssize_t cpu_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct cpu_private *priv = seq->private;
+ char buf[19];
+
+ if ((priv == NULL) || (count >= sizeof(buf)))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
+ if (!write_cpu_register(priv, buf))
+ return count;
+
+ return -EACCES;
+}
+
+static const struct file_operations cpu_fops = {
+ .owner = THIS_MODULE,
+ .open = cpu_seq_open,
+ .read = seq_read,
+ .write = cpu_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
+ unsigned file, struct dentry *dentry)
+{
+ struct cpu_private *priv = NULL;
+
+ /* Already intialized */
+ if (file == CPU_INDEX_BIT)
+ if (per_cpu(cpu_arr[type].init, cpu))
+ return 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->cpu = cpu;
+ priv->type = type;
+ priv->reg = reg;
+ priv->file = file;
+ mutex_lock(&cpu_debug_lock);
+ per_cpu(priv_arr[type], cpu) = priv;
+ per_cpu(cpu_priv_count, cpu)++;
+ mutex_unlock(&cpu_debug_lock);
+
+ if (file)
+ debugfs_create_file(cpu_file[file].name, S_IRUGO,
+ dentry, (void *)priv, &cpu_fops);
+ else {
+ debugfs_create_file(cpu_base[type].name, S_IRUGO,
+ per_cpu(cpu_arr[type].dentry, cpu),
+ (void *)priv, &cpu_fops);
+ mutex_lock(&cpu_debug_lock);
+ per_cpu(cpu_arr[type].init, cpu) = 1;
+ mutex_unlock(&cpu_debug_lock);
+ }
+
+ return 0;
+}
+
+static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
+ struct dentry *dentry)
+{
+ unsigned file;
+ int err = 0;
+
+ for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
+ err = cpu_create_file(cpu, type, reg, file, dentry);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
+{
+ struct dentry *cpu_dentry = NULL;
+ unsigned reg, reg_min, reg_max;
+ int i, err = 0;
+ char reg_dir[12];
+ u32 low, high;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
+ if (!get_cpu_range(cpu, ®_min, ®_max, i,
+ cpu_base[type].flag))
+ continue;
+
+ for (reg = reg_min; reg <= reg_max; reg++) {
+ if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
+ continue;
+
+ sprintf(reg_dir, "0x%x", reg);
+ cpu_dentry = debugfs_create_dir(reg_dir, dentry);
+ err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
+ if (err)
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
+{
+ struct dentry *cpu_dentry = NULL;
+ unsigned type;
+ int err = 0;
+
+ for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
+ if (!is_typeflag_valid(cpu, cpu_base[type].flag))
+ continue;
+ cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
+ per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
+
+ if (type < CPU_TSS_BIT)
+ err = cpu_init_msr(cpu, type, cpu_dentry);
+ else
+ err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
+ cpu_dentry);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int cpu_init_cpu(void)
+{
+ struct dentry *cpu_dentry = NULL;
+ struct cpuinfo_x86 *cpui;
+ char cpu_dir[12];
+ unsigned cpu;
+ int err = 0;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ cpui = &cpu_data(cpu);
+ if (!cpu_has(cpui, X86_FEATURE_MSR))
+ continue;
+
+ sprintf(cpu_dir, "cpu%d", cpu);
+ cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
+ err = cpu_init_allreg(cpu, cpu_dentry);
+
+ pr_info("cpu%d(%d) debug files %d\n",
+ cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
+ if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
+ pr_err("Register files count %d exceeds limit %d\n",
+ per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
+ per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
+ err = -ENFILE;
+ }
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int __init cpu_debug_init(void)
+{
+ cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
+
+ return cpu_init_cpu();
+}
+
+static void __exit cpu_debug_exit(void)
+{
+ int i, cpu;
+
+ if (cpu_debugfs_dir)
+ debugfs_remove_recursive(cpu_debugfs_dir);
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
+ kfree(per_cpu(priv_arr[i], cpu));
+}
+
+module_init(cpu_debug_init);
+module_exit(cpu_debug_exit);
+
+MODULE_AUTHOR("Jaswinder Singh Rajput");
+MODULE_DESCRIPTION("CPU Debug module");
+MODULE_LICENSE("GPL");
kfree(data->powernow_table);
kfree(data);
- per_cpu(powernow_data, pol->cpu) = NULL;
return 0;
}
int err;
if (!data)
- return 0;
+ return -EINVAL;
smp_call_function_single(cpu, query_values_on_cpu, &err, true);
if (err)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+ set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
sched_clock_stable = 1;
}
{ 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
{ 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
{ 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
- { 0xd7, LVL_3, 2048 }, /* 8-way set assoc, 64 byte line size */
+ { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
{ 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
{ 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
{ 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
{ 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
{ 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
{ 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
- { 0xea, LVL_3, 12288 }, /* 24-way set assoc, 64 byte line size */
- { 0xeb, LVL_3, 18432 }, /* 24-way set assoc, 64 byte line size */
- { 0xec, LVL_3, 24576 }, /* 24-way set assoc, 64 byte line size */
{ 0x00, 0, 0}
};
struct timer_list *t = &__get_cpu_var(mce_timer);
int *n = &__get_cpu_var(mce_next_interval);
- setup_timer(t, mcheck_timer, smp_processor_id());
-
if (mce_ignore_ce)
return;
*n = check_interval * HZ;
if (!*n)
return;
+ setup_timer(t, mcheck_timer, smp_processor_id());
t->expires = round_jiffies(jiffies + *n);
add_timer_on(t, smp_processor_id());
}
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- if (!mce_ignore_ce && check_interval) {
- t->expires = round_jiffies(jiffies +
+ t->expires = round_jiffies(jiffies +
__get_cpu_var(mce_next_interval));
- add_timer_on(t, cpu);
- }
+ add_timer_on(t, cpu);
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
break;
case CPU_POST_DEAD:
static atomic_t therm_throt_en = ATOMIC_INIT(0);
-static u32 lvtthmr_init __read_mostly;
-
#ifdef CONFIG_SYSFS
#define define_therm_throt_sysdev_one_ro(_name) \
static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
ack_APIC_irq();
}
-void __init mcheck_intel_therm_init(void)
-{
- /*
- * This function is only called on boot CPU. Save the init thermal
- * LVT value on BSP and use that value to restore APs' thermal LVT
- * entry BIOS programmed later
- */
- if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) &&
- cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
- lvtthmr_init = apic_read(APIC_LVTTHMR);
-}
-
void intel_init_thermal(struct cpuinfo_x86 *c)
{
unsigned int cpu = smp_processor_id();
int tm2 = 0;
u32 l, h;
- /* Thermal monitoring depends on APIC, ACPI and clock modulation */
- if (!cpu_has_apic || !cpu_has(c, X86_FEATURE_ACPI) ||
- !cpu_has(c, X86_FEATURE_ACC))
+ /* Thermal monitoring depends on ACPI and clock modulation*/
+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
return;
/*
* since it might be delivered via SMI already:
*/
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-
- /*
- * The initial value of thermal LVT entries on all APs always reads
- * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
- * sequence to them and LVT registers are reset to 0s except for
- * the mask bits which are set to 1s when APs receive INIT IPI.
- * Always restore the value that BIOS has programmed on AP based on
- * BSP's info we saved since BIOS is always setting the same value
- * for all threads/cores
- */
- apic_write(APIC_LVTTHMR, lvtthmr_init);
-
- h = lvtthmr_init;
-
+ h = apic_read(APIC_LVTTHMR);
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
printk(KERN_DEBUG
"CPU%d: Thermal monitoring handled by SMI\n", cpu);
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
- boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17)
+ boot_cpu_data.x86 != 16)
return;
wd_ops = &k7_wd_ops;
break;
int i, err = 0;
i = 0;
- if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
- "cpu/cpuid", &cpuid_fops)) {
+ if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) {
printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
CPUID_MAJOR);
err = -EBUSY;
}
class_destroy(cpuid_class);
out_chrdev:
- __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
+ unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
out:
return err;
}
for_each_online_cpu(cpu)
cpuid_device_destroy(cpu);
class_destroy(cpuid_class);
- __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
+ unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
}
* HPET address is set in acpi/boot.c, when an ACPI entry exists
*/
unsigned long hpet_address;
-u8 hpet_msi_disable;
-
#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
#endif
unsigned int num_timers_used = 0;
int i;
- if (hpet_msi_disable)
- return;
-
id = hpet_readl(HPET_ID);
num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
hpet_print_config();
- if (hpet_msi_disable)
- return 0;
-
for_each_online_cpu(cpu) {
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
}
int i, err = 0;
i = 0;
- if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
+ if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
printk(KERN_ERR "msr: unable to get major %d for msr\n",
MSR_MAJOR);
err = -EBUSY;
msr_device_destroy(i);
class_destroy(msr_class);
out_chrdev:
- __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
+ unregister_chrdev(MSR_MAJOR, "cpu/msr");
out:
return err;
}
for_each_online_cpu(cpu)
msr_device_destroy(cpu);
class_destroy(msr_class);
- __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
+ unregister_chrdev(MSR_MAJOR, "cpu/msr");
unregister_hotcpu_notifier(&msr_class_cpu_notifier);
}
pdev = to_pci_dev(dev);
- /* search up the device tree for an iommu */
pbus = pdev->bus;
- do {
- tbl = pci_iommu(pbus);
- if (tbl && tbl->it_busno == pbus->number)
- break;
- tbl = NULL;
+
+ /* is the device behind a bridge? Look for the root bus */
+ while (pbus->parent)
pbus = pbus->parent;
- } while (pbus);
+
+ tbl = pci_iommu(pbus);
BUG_ON(tbl && (tbl->it_busno != pbus->number));
if (!strncmp(p, "allowdac", 8))
forbid_dac = 0;
if (!strncmp(p, "nodac", 5))
- forbid_dac = 1;
+ forbid_dac = -1;
if (!strncmp(p, "usedac", 6)) {
forbid_dac = -1;
return 1;
#endif
if (isdigit(*p) && get_option(&p, &arg))
iommu_size = arg;
- if (!strncmp(p, "fullflush", 9))
+ if (!strncmp(p, "fullflush", 8))
iommu_fullflush = 1;
if (!strncmp(p, "nofullflush", 11))
iommu_fullflush = 0;
{
struct task_struct *tsk = current;
+#ifdef CONFIG_X86_64
+ if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
+ clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
+ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
+ clear_tsk_thread_flag(tsk, TIF_IA32);
+ } else {
+ set_tsk_thread_flag(tsk, TIF_IA32);
+ current_thread_info()->status |= TS_COMPAT;
+ }
+ }
+#endif
+
clear_tsk_thread_flag(tsk, TIF_DEBUG);
tsk->thread.debugreg0 = 0;
return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
}
-void set_personality_ia32(void)
-{
- /* inherit personality from parent */
-
- /* Make sure to be in 32bit mode */
- set_thread_flag(TIF_IA32);
-
- /* Prepare the first "return" to user space */
- current_thread_info()->status |= TS_COMPAT;
-}
-
unsigned long get_wchan(struct task_struct *p)
{
unsigned long stack;
{
if (kbuf) {
unsigned long *k = kbuf;
- while (count >= sizeof(*k)) {
+ while (count > 0) {
*k++ = getreg(target, pos);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
unsigned long __user *u = ubuf;
- while (count >= sizeof(*u)) {
+ while (count > 0) {
if (__put_user(getreg(target, pos), u++))
return -EFAULT;
count -= sizeof(*u);
int ret = 0;
if (kbuf) {
const unsigned long *k = kbuf;
- while (count >= sizeof(*k) && !ret) {
+ while (count > 0 && !ret) {
ret = putreg(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const unsigned long __user *u = ubuf;
- while (count >= sizeof(*u) && !ret) {
+ while (count > 0 && !ret) {
unsigned long word;
ret = __get_user(word, u++);
if (ret)
{
if (kbuf) {
compat_ulong_t *k = kbuf;
- while (count >= sizeof(*k)) {
+ while (count > 0) {
getreg32(target, pos, k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
compat_ulong_t __user *u = ubuf;
- while (count >= sizeof(*u)) {
+ while (count > 0) {
compat_ulong_t word;
getreg32(target, pos, &word);
if (__put_user(word, u++))
int ret = 0;
if (kbuf) {
const compat_ulong_t *k = kbuf;
- while (count >= sizeof(*k) && !ret) {
+ while (count > 0 && !ret) {
ret = putreg32(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const compat_ulong_t __user *u = ubuf;
- while (count >= sizeof(*u) && !ret) {
+ while (count > 0 && !ret) {
compat_ulong_t word;
ret = __get_user(word, u++);
if (ret)
break;
}
}
-
-/*
- * HPET MSI on some boards (ATI SB700/SB800) has side effect on
- * floppy DMA. Disable HPET MSI on such platforms.
- */
-static void force_disable_hpet_msi(struct pci_dev *unused)
-{
- hpet_msi_disable = 1;
-}
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
- force_disable_hpet_msi);
-
#endif
#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
},
},
- { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G*/
- .callback = set_bios_reboot,
- .ident = "Dell OptiPlex 760",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
- DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
- },
- },
{ /* Handle problems with rebooting on Dell 2400's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 2400",
DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
},
},
- { /* Handle problems with rebooting on ASUS P4S800 */
- .callback = set_bios_reboot,
- .ident = "ASUS P4S800",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
- },
- },
{ }
};
#ifdef CONFIG_X86_64
#include <asm/numa_64.h>
#endif
-#include <asm/mce.h>
/*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
},
},
+ {
/*
- * AMI BIOS with low memory corruption was found on Intel DG45ID and
- * DG45FC boards.
- * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
+ * AMI BIOS with low memory corruption was found on Intel DG45ID board.
+ * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
* match only DMI_BOARD_NAME and see if there is more bad products
* with this vendor.
*/
- {
.callback = dmi_low_memory_corruption,
.ident = "AMI BIOS",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
},
},
- {
- .callback = dmi_low_memory_corruption,
- .ident = "AMI BIOS",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
- },
- },
#endif
{}
};
#endif
#endif
x86_init.oem.banner();
-
- mcheck_intel_therm_init();
}
#ifdef CONFIG_X86_32
#include <asm/syscalls.h>
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file *file = NULL;
+ struct mm_struct *mm = current->mm;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(&mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/i386 didn't use to be able to handle more than
if (a.offset & ~PAGE_MASK)
goto out;
- err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
+ err = sys_mmap2(a.addr, a.len, a.prot, a.flags,
a.fd, a.offset >> PAGE_SHIFT);
out:
return err;
unsigned long, fd, unsigned long, off)
{
long error;
+ struct file *file;
+
error = -EINVAL;
if (off & ~PAGE_MASK)
goto out;
- error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ error = -EBADF;
+ file = NULL;
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
out:
return error;
}
.long sys_ni_syscall /* reserved for streams2 */
.long ptregs_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap_pgoff
+ .long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
*/
apicid = blade_to_first_apicid(blade);
pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
+ if ((pa & 0xff) != UV_BAU_MESSAGE) {
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
((apicid << 32) | UV_BAU_MESSAGE));
+ }
return 0;
}
{
if (!tsc_unstable) {
tsc_unstable = 1;
- sched_clock_stable = 0;
printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
{
int rc = 0;
- /* x86 instructions are limited to 15 bytes. */
- if (eip + size - ctxt->decode.eip_orig > 15)
- return X86EMUL_UNHANDLEABLE;
eip += ctxt->cs_base;
while (size--) {
rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
/* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
- c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu);
+ c->eip = kvm_rip_read(ctxt->vcpu);
ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
return -EOPNOTSUPP;
addr &= KVM_PIT_CHANNEL_MASK;
- if (addr == 3)
- return 0;
-
s = &pit_state->channels[addr];
mutex_lock(&pit_state->lock);
if (unlikely(!apic_enabled(apic)))
break;
- if (trig_mode) {
- apic_debug("level trig mode for vector %d", vector);
- apic_set_vector(vector, apic->regs + APIC_TMR);
- } else
- apic_clear_vector(vector, apic->regs + APIC_TMR);
-
result = !apic_test_and_set_irr(vector, apic);
trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
trig_mode, vector, !result);
break;
}
+ if (trig_mode) {
+ apic_debug("level trig mode for vector %d", vector);
+ apic_set_vector(vector, apic->regs + APIC_TMR);
+ } else
+ apic_clear_vector(vector, apic->regs + APIC_TMR);
kvm_vcpu_kick(vcpu);
break;
hrtimer_cancel(&apic->lapic_timer.timer);
update_divide_count(apic);
start_apic_timer(apic);
- apic->irr_pending = true;
}
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
- return PT_PAGE_TABLE_LEVEL;
+ return page_size;
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (host_level == PT_PAGE_TABLE_LEVEL)
return host_level;
- for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level)
+ for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) {
+
if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
break;
+ }
return level - 1;
}
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
- if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)))
- goto not_present;
-
+ kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
trace_kvm_mmu_paging_element(pte, walker->level);
if (!is_present_gpte(pte))
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
+ pt_element_t gpte;
+ gpa_t pte_gpa = -1;
int level;
u64 *sptep;
int need_flush = 0;
if (level == PT_PAGE_TABLE_LEVEL ||
((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+ pte_gpa = (sp->gfn << PAGE_SHIFT);
+ pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) {
rmap_remove(vcpu->kvm, sptep);
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
spin_unlock(&vcpu->kvm->mmu_lock);
+
+ if (pte_gpa == -1)
+ return;
+ if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+ sizeof(pt_element_t)))
+ return;
+ if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
+ if (mmu_topup_memory_caches(vcpu))
+ return;
+ kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
+ sizeof(pt_element_t), 0);
+ }
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
*
* This list is modified at module load time to reflect the
- * capabilities of the host cpu. This capabilities test skips MSRs that are
- * kvm-specific. Those are put in the beginning of the list.
+ * capabilities of the host cpu.
*/
-
-#define KVM_SAVE_MSRS_BEGIN 2
static u32 msrs_to_save[] = {
- MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_K6_STAR,
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
- MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
+ MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
};
static unsigned num_msrs_to_save;
{
static int version;
struct pvclock_wall_clock wc;
- struct timespec boot;
+ struct timespec now, sys, boot;
if (!wall_clock)
return;
* wall clock specified here. guest system time equals host
* system time for us, thus we must fill in host boot time here.
*/
- getboottime(&boot);
+ now = current_kernel_time();
+ ktime_get_ts(&sys);
+ boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
ktime_get_ts(&ts);
- monotonic_to_bootbased(&ts);
local_irq_restore(flags);
/* With all the info we got, fill in the values */
vcpu->hv_clock.system_time = ts.tv_nsec +
- (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
-
+ (NSEC_PER_SEC * (u64)ts.tv_sec);
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
- case KVM_CAP_ADJUST_CLOCK:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = 0;
break;
}
- case KVM_SET_CLOCK: {
- struct timespec now;
- struct kvm_clock_data user_ns;
- u64 now_ns;
- s64 delta;
-
- r = -EFAULT;
- if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
- goto out;
-
- r = -EINVAL;
- if (user_ns.flags)
- goto out;
-
- r = 0;
- ktime_get_ts(&now);
- now_ns = timespec_to_ns(&now);
- delta = user_ns.clock - now_ns;
- kvm->arch.kvmclock_offset = delta;
- break;
- }
- case KVM_GET_CLOCK: {
- struct timespec now;
- struct kvm_clock_data user_ns;
- u64 now_ns;
-
- ktime_get_ts(&now);
- now_ns = timespec_to_ns(&now);
- user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
- user_ns.flags = 0;
-
- r = -EFAULT;
- if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
- goto out;
- r = 0;
- break;
- }
-
default:
;
}
u32 dummy[2];
unsigned i, j;
- /* skip the first msrs in the list. KVM-specific */
- for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
+ for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
continue;
if (j < i)
GFP_KERNEL);
if (!vcpu->arch.mce_banks) {
r = -ENOMEM;
- goto fail_free_lapic;
+ goto fail_mmu_destroy;
}
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
return 0;
-fail_free_lapic:
- kvm_free_lapic(vcpu);
+
fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu);
down_read(&vcpu->kvm->slots_lock);
kvm_mmu_destroy(vcpu);
# Makefile for x86 specific library files.
#
-obj-$(CONFIG_SMP) += msr-smp.o
+obj-$(CONFIG_SMP) := msr.o
lib-y := delay.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-obj-y += msr.o msr-reg.o msr-reg-export.o
+obj-y += msr-reg.o msr-reg-export.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
+++ /dev/null
-#include <linux/module.h>
-#include <linux/preempt.h>
-#include <linux/smp.h>
-#include <asm/msr.h>
-
-static void __rdmsr_on_cpu(void *info)
-{
- struct msr_info *rv = info;
- struct msr *reg;
- int this_cpu = raw_smp_processor_id();
-
- if (rv->msrs)
- reg = per_cpu_ptr(rv->msrs, this_cpu);
- else
- reg = &rv->reg;
-
- rdmsr(rv->msr_no, reg->l, reg->h);
-}
-
-static void __wrmsr_on_cpu(void *info)
-{
- struct msr_info *rv = info;
- struct msr *reg;
- int this_cpu = raw_smp_processor_id();
-
- if (rv->msrs)
- reg = per_cpu_ptr(rv->msrs, this_cpu);
- else
- reg = &rv->reg;
-
- wrmsr(rv->msr_no, reg->l, reg->h);
-}
-
-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
- *l = rv.reg.l;
- *h = rv.reg.h;
-
- return err;
-}
-EXPORT_SYMBOL(rdmsr_on_cpu);
-
-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- rv.reg.l = l;
- rv.reg.h = h;
- err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
-
- return err;
-}
-EXPORT_SYMBOL(wrmsr_on_cpu);
-
-static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
- struct msr *msrs,
- void (*msr_func) (void *info))
-{
- struct msr_info rv;
- int this_cpu;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msrs = msrs;
- rv.msr_no = msr_no;
-
- this_cpu = get_cpu();
-
- if (cpumask_test_cpu(this_cpu, mask))
- msr_func(&rv);
-
- smp_call_function_many(mask, msr_func, &rv, 1);
- put_cpu();
-}
-
-/* rdmsr on a bunch of CPUs
- *
- * @mask: which CPUs
- * @msr_no: which MSR
- * @msrs: array of MSR values
- *
- */
-void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
-{
- __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
-}
-EXPORT_SYMBOL(rdmsr_on_cpus);
-
-/*
- * wrmsr on a bunch of CPUs
- *
- * @mask: which CPUs
- * @msr_no: which MSR
- * @msrs: array of MSR values
- *
- */
-void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
-{
- __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
-}
-EXPORT_SYMBOL(wrmsr_on_cpus);
-
-/* These "safe" variants are slower and should be used when the target MSR
- may not actually exist. */
-static void __rdmsr_safe_on_cpu(void *info)
-{
- struct msr_info *rv = info;
-
- rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
-}
-
-static void __wrmsr_safe_on_cpu(void *info)
-{
- struct msr_info *rv = info;
-
- rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
-}
-
-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
- *l = rv.reg.l;
- *h = rv.reg.h;
-
- return err ? err : rv.err;
-}
-EXPORT_SYMBOL(rdmsr_safe_on_cpu);
-
-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- rv.reg.l = l;
- rv.reg.h = h;
- err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
-
- return err ? err : rv.err;
-}
-EXPORT_SYMBOL(wrmsr_safe_on_cpu);
-
-/*
- * These variants are significantly slower, but allows control over
- * the entire 32-bit GPR set.
- */
-static void __rdmsr_safe_regs_on_cpu(void *info)
-{
- struct msr_regs_info *rv = info;
-
- rv->err = rdmsr_safe_regs(rv->regs);
-}
-
-static void __wrmsr_safe_regs_on_cpu(void *info)
-{
- struct msr_regs_info *rv = info;
-
- rv->err = wrmsr_safe_regs(rv->regs);
-}
-
-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
-{
- int err;
- struct msr_regs_info rv;
-
- rv.regs = regs;
- rv.err = -EIO;
- err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
-
- return err ? err : rv.err;
-}
-EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
-
-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
-{
- int err;
- struct msr_regs_info rv;
-
- rv.regs = regs;
- rv.err = -EIO;
- err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
-
- return err ? err : rv.err;
-}
-EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
#include <linux/module.h>
#include <linux/preempt.h>
+#include <linux/smp.h>
#include <asm/msr.h>
-struct msr *msrs_alloc(void)
+struct msr_info {
+ u32 msr_no;
+ struct msr reg;
+ struct msr *msrs;
+ int off;
+ int err;
+};
+
+static void __rdmsr_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+ struct msr *reg;
+ int this_cpu = raw_smp_processor_id();
+
+ if (rv->msrs)
+ reg = &rv->msrs[this_cpu - rv->off];
+ else
+ reg = &rv->reg;
+
+ rdmsr(rv->msr_no, reg->l, reg->h);
+}
+
+static void __wrmsr_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+ struct msr *reg;
+ int this_cpu = raw_smp_processor_id();
+
+ if (rv->msrs)
+ reg = &rv->msrs[this_cpu - rv->off];
+ else
+ reg = &rv->reg;
+
+ wrmsr(rv->msr_no, reg->l, reg->h);
+}
+
+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+ *l = rv.reg.l;
+ *h = rv.reg.h;
+
+ return err;
+}
+EXPORT_SYMBOL(rdmsr_on_cpu);
+
+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ rv.reg.l = l;
+ rv.reg.h = h;
+ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+
+ return err;
+}
+EXPORT_SYMBOL(wrmsr_on_cpu);
+
+/* rdmsr on a bunch of CPUs
+ *
+ * @mask: which CPUs
+ * @msr_no: which MSR
+ * @msrs: array of MSR values
+ *
+ */
+void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
+{
+ struct msr_info rv;
+ int this_cpu;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.off = cpumask_first(mask);
+ rv.msrs = msrs;
+ rv.msr_no = msr_no;
+
+ this_cpu = get_cpu();
+
+ if (cpumask_test_cpu(this_cpu, mask))
+ __rdmsr_on_cpu(&rv);
+
+ smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
+ put_cpu();
+}
+EXPORT_SYMBOL(rdmsr_on_cpus);
+
+/*
+ * wrmsr on a bunch of CPUs
+ *
+ * @mask: which CPUs
+ * @msr_no: which MSR
+ * @msrs: array of MSR values
+ *
+ */
+void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
+{
+ struct msr_info rv;
+ int this_cpu;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.off = cpumask_first(mask);
+ rv.msrs = msrs;
+ rv.msr_no = msr_no;
+
+ this_cpu = get_cpu();
+
+ if (cpumask_test_cpu(this_cpu, mask))
+ __wrmsr_on_cpu(&rv);
+
+ smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
+ put_cpu();
+}
+EXPORT_SYMBOL(wrmsr_on_cpus);
+
+/* These "safe" variants are slower and should be used when the target MSR
+ may not actually exist. */
+static void __rdmsr_safe_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+
+ rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
+}
+
+static void __wrmsr_safe_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+
+ rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
+}
+
+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
- struct msr *msrs = NULL;
+ int err;
+ struct msr_info rv;
- msrs = alloc_percpu(struct msr);
- if (!msrs) {
- pr_warning("%s: error allocating msrs\n", __func__);
- return NULL;
- }
+ memset(&rv, 0, sizeof(rv));
- return msrs;
+ rv.msr_no = msr_no;
+ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+ *l = rv.reg.l;
+ *h = rv.reg.h;
+
+ return err ? err : rv.err;
}
-EXPORT_SYMBOL(msrs_alloc);
+EXPORT_SYMBOL(rdmsr_safe_on_cpu);
-void msrs_free(struct msr *msrs)
+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
- free_percpu(msrs);
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ rv.reg.l = l;
+ rv.reg.h = h;
+ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsr_safe_on_cpu);
+
+/*
+ * These variants are significantly slower, but allows control over
+ * the entire 32-bit GPR set.
+ */
+struct msr_regs_info {
+ u32 *regs;
+ int err;
+};
+
+static void __rdmsr_safe_regs_on_cpu(void *info)
+{
+ struct msr_regs_info *rv = info;
+
+ rv->err = rdmsr_safe_regs(rv->regs);
+}
+
+static void __wrmsr_safe_regs_on_cpu(void *info)
+{
+ struct msr_regs_info *rv = info;
+
+ rv->err = wrmsr_safe_regs(rv->regs);
+}
+
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+{
+ int err;
+ struct msr_regs_info rv;
+
+ rv.regs = regs;
+ rv.err = -EIO;
+ err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
+
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+{
+ int err;
+ struct msr_regs_info rv;
+
+ rv.regs = regs;
+ rv.err = -EIO;
+ err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
}
-EXPORT_SYMBOL(msrs_free);
+EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
}
- if (changed) {
- node_set(node, cpu_nodes_parsed);
+ if (changed)
printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
nd->start, nd->end);
- }
}
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
/* move to next set */
si += model->num_counters;
- if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
+ if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
per_cpu(switch_index, cpu) = 0;
else
per_cpu(switch_index, cpu) = si;
case 15: case 23:
*cpu_type = "i386/core_2";
break;
- case 0x2e:
case 26:
spec = &op_arch_perfmon_spec;
*cpu_type = "i386/core_i7";
return -EINVAL;
prot = pgprot_val(vma->vm_page_prot);
-
- /*
- * Return error if pat is not enabled and write_combine is requested.
- * Caller can followup with UC MINUS request and add a WC mtrr if there
- * is a free mtrr slot.
- */
- if (!pat_enabled && write_combine)
- return -EINVAL;
-
if (pat_enabled && write_combine)
prot |= _PAGE_CACHE_WC;
else if (pat_enabled || boot_cpu_data.x86 > 3)
*/
void xen_vcpu_restore(void)
{
- int cpu;
+ if (have_vcpu_info_placement) {
+ int cpu;
- for_each_online_cpu(cpu) {
- bool other_cpu = (cpu != smp_processor_id());
+ for_each_online_cpu(cpu) {
+ bool other_cpu = (cpu != smp_processor_id());
- if (other_cpu &&
- HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
- BUG();
-
- xen_setup_runstate_info(cpu);
+ if (other_cpu &&
+ HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
+ BUG();
- if (have_vcpu_info_placement)
xen_vcpu_setup(cpu);
- if (other_cpu &&
- HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
- BUG();
+ if (other_cpu &&
+ HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
+ BUG();
+ }
+
+ BUG_ON(!have_vcpu_info_placement);
}
}
xen_raw_console_write("about to get started...\n");
- xen_setup_runstate_info(0);
-
/* Start the world */
#ifdef CONFIG_X86_32
i386_start_kernel();
}
/* Build the parallel p2m_top_mfn structures */
-void xen_build_mfn_list_list(void)
+static void __init xen_build_mfn_list_list(void)
{
unsigned pfn, idx;
(unsigned long)task_stack_page(idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
#endif
- xen_setup_runstate_info(cpu);
xen_setup_timer(cpu);
xen_init_lock_cpu(cpu);
#include <linux/types.h>
-#include <linux/clockchips.h>
#include <xen/interface/xen.h>
#include <xen/grant_table.h>
void xen_post_suspend(int suspend_cancelled)
{
- xen_build_mfn_list_list();
-
xen_setup_shared_info();
if (suspend_cancelled) {
}
-static void xen_vcpu_notify_restore(void *data)
-{
- unsigned long reason = (unsigned long)data;
-
- /* Boot processor notified via generic timekeeping_resume() */
- if ( smp_processor_id() == 0)
- return;
-
- clockevents_notify(reason, NULL);
-}
-
void xen_arch_resume(void)
{
- smp_call_function(xen_vcpu_notify_restore,
- (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
+ /* nothing */
}
return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
}
-void xen_setup_runstate_info(int cpu)
+static void setup_runstate_info(int cpu)
{
struct vcpu_register_runstate_memory_area area;
name = "<timer kasprintf failed>";
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
- IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
name, NULL);
evt = &per_cpu(xen_clock_events, cpu);
evt->cpumask = cpumask_of(cpu);
evt->irq = irq;
+
+ setup_runstate_info(cpu);
}
void xen_teardown_timer(int cpu)
setup_force_cpu_cap(X86_FEATURE_TSC);
- xen_setup_runstate_info(cpu);
xen_setup_timer(cpu);
xen_setup_cpu_clockevents();
}
pushq $__USER32_CS
pushq %rcx
- pushq $0
+ pushq $VGCF_in_syscall
1: jmp hypercall_iret
ENDPATCH(xen_sysret32)
RELOC(xen_sysret32, 1b+1)
ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
- pushq $0
+ pushq $VGCF_in_syscall
jmp hypercall_iret
ENDPROC(xen_syscall32_target)
ENDPROC(xen_sysenter_target)
void xen_setup_mfn_list_list(void);
void xen_setup_shared_info(void);
-void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_ident_map_ISA(void);
void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
-void xen_setup_runstate_info(int cpu);
void xen_teardown_timer(int cpu);
cycle_t xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void);
asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
asmlinkage long xtensa_pipe(int __user *);
+asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
asmlinkage long xtensa_ptrace(long, long, long, long);
asmlinkage long xtensa_sigreturn(struct pt_regs*);
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
/* File Map / Shared Memory Operations */
#define __NR_mmap2 80
-__SYSCALL( 80, sys_mmap_pgoff, 6)
+__SYSCALL( 80, xtensa_mmap2, 6)
#define __NR_munmap 81
__SYSCALL( 81, sys_munmap, 2)
#define __NR_mprotect 82
return error;
}
+
+asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
{
unsigned long ret;
}
EXPORT_SYMBOL(blk_stack_limits);
-/**
- * bdev_stack_limits - adjust queue limits for stacked drivers
- * @t: the stacking driver limits (top device)
- * @bdev: the component block_device (bottom)
- * @start: first data sector within component device
- *
- * Description:
- * Merges queue limits for a top device and a block_device. Returns
- * 0 if alignment didn't change. Returns -1 if adding the bottom
- * device caused misalignment.
- */
-int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t start)
-{
- struct request_queue *bq = bdev_get_queue(bdev);
-
- start += get_start_sect(bdev);
-
- return blk_stack_limits(t, &bq->limits, start << 9);
-}
-EXPORT_SYMBOL(bdev_stack_limits);
-
/**
* disk_stack_limits - adjust queue limits for stacked drivers
* @disk: MD/DM gendisk (top)
EXPORT_SYMBOL(acpi_bus_can_wakeup);
-static void acpi_print_osc_error(acpi_handle handle,
- struct acpi_osc_context *context, char *error)
-{
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
- int i;
-
- if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
- printk(KERN_DEBUG "%s\n", error);
- else {
- printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error);
- kfree(buffer.pointer);
- }
- printk(KERN_DEBUG"_OSC request data:");
- for (i = 0; i < context->cap.length; i += sizeof(u32))
- printk("%x ", *((u32 *)(context->cap.pointer + i)));
- printk("\n");
-}
-
-static u8 hex_val(unsigned char c)
-{
- return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
-}
-
-static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
-{
- int i;
- static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
- 24, 26, 28, 30, 32, 34};
-
- if (strlen(str) != 36)
- return AE_BAD_PARAMETER;
- for (i = 0; i < 36; i++) {
- if (i == 8 || i == 13 || i == 18 || i == 23) {
- if (str[i] != '-')
- return AE_BAD_PARAMETER;
- } else if (!isxdigit(str[i]))
- return AE_BAD_PARAMETER;
- }
- for (i = 0; i < 16; i++) {
- uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
- uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
- }
- return AE_OK;
-}
-
-acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
-{
- acpi_status status;
- struct acpi_object_list input;
- union acpi_object in_params[4];
- union acpi_object *out_obj;
- u8 uuid[16];
- u32 errors;
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
-
- if (!context)
- return AE_ERROR;
- if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
- return AE_ERROR;
- context->ret.length = ACPI_ALLOCATE_BUFFER;
- context->ret.pointer = NULL;
-
- /* Setting up input parameters */
- input.count = 4;
- input.pointer = in_params;
- in_params[0].type = ACPI_TYPE_BUFFER;
- in_params[0].buffer.length = 16;
- in_params[0].buffer.pointer = uuid;
- in_params[1].type = ACPI_TYPE_INTEGER;
- in_params[1].integer.value = context->rev;
- in_params[2].type = ACPI_TYPE_INTEGER;
- in_params[2].integer.value = context->cap.length/sizeof(u32);
- in_params[3].type = ACPI_TYPE_BUFFER;
- in_params[3].buffer.length = context->cap.length;
- in_params[3].buffer.pointer = context->cap.pointer;
-
- status = acpi_evaluate_object(handle, "_OSC", &input, &output);
- if (ACPI_FAILURE(status))
- return status;
-
- if (!output.length)
- return AE_NULL_OBJECT;
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER
- || out_obj->buffer.length != context->cap.length) {
- acpi_print_osc_error(handle, context,
- "_OSC evaluation returned wrong type");
- status = AE_TYPE;
- goto out_kfree;
- }
- /* Need to ignore the bit0 in result code */
- errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
- if (errors) {
- if (errors & OSC_REQUEST_ERROR)
- acpi_print_osc_error(handle, context,
- "_OSC request failed");
- if (errors & OSC_INVALID_UUID_ERROR)
- acpi_print_osc_error(handle, context,
- "_OSC invalid UUID");
- if (errors & OSC_INVALID_REVISION_ERROR)
- acpi_print_osc_error(handle, context,
- "_OSC invalid revision");
- if (errors & OSC_CAPABILITIES_MASK_ERROR) {
- if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
- & OSC_QUERY_ENABLE)
- goto out_success;
- status = AE_SUPPORT;
- goto out_kfree;
- }
- status = AE_ERROR;
- goto out_kfree;
- }
-out_success:
- context->ret.length = out_obj->buffer.length;
- context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL);
- if (!context->ret.pointer) {
- status = AE_NO_MEMORY;
- goto out_kfree;
- }
- memcpy(context->ret.pointer, out_obj->buffer.pointer,
- context->ret.length);
- status = AE_OK;
-
-out_kfree:
- kfree(output.pointer);
- if (status != AE_OK)
- context->ret.pointer = NULL;
- return status;
-}
-EXPORT_SYMBOL(acpi_run_osc);
-
-static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
-static void acpi_bus_osc_support(void)
-{
- u32 capbuf[2];
- struct acpi_osc_context context = {
- .uuid_str = sb_uuid_str,
- .rev = 1,
- .cap.length = 8,
- .cap.pointer = capbuf,
- };
- acpi_handle handle;
-
- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
-#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
- defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
-#endif
-
-#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
-#endif
- if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
- return;
- if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
- kfree(context.ret.pointer);
- /* do we need to check the returned cap? Sounds no */
-}
-
/* --------------------------------------------------------------------------
Event Management
-------------------------------------------------------------------------- */
status = acpi_ec_ecdt_probe();
/* Ignore result. Not having an ECDT is not fatal. */
- acpi_bus_osc_support();
-
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
device);
- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
- /*
- * It is also regarded as success if the notifier_chain
- * returns NOTIFY_OK or NOTIFY_DONE.
- */
- ret = 0;
- }
return ret;
}
spin_unlock_irqrestore(&ec->curr_lock, flags);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static void acpi_ec_gpe_query(void *ec_cxt);
-static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
+static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_ec_sync_query(ec);
+ return acpi_os_execute(OSL_EC_BURST_HANDLER,
+ acpi_ec_gpe_query, ec);
}
return 0;
}
{
unsigned long tmp;
int ret = 0;
+ pr_debug(PREFIX "transaction start\n");
+ /* disable GPE during transaction if storm is detected */
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ acpi_disable_gpe(NULL, ec->gpe);
+ }
if (EC_FLAGS_MSI)
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
spin_unlock_irqrestore(&ec->curr_lock, tmp);
ret = ec_poll(ec);
+ pr_debug(PREFIX "transaction end\n");
spin_lock_irqsave(&ec->curr_lock, tmp);
ec->curr = NULL;
spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ /* check if we received SCI during transaction */
+ ec_check_sci(ec, acpi_ec_read_status(ec));
+ /* it is safe to enable GPE outside of transaction */
+ acpi_enable_gpe(NULL, ec->gpe);
+ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ }
return ret;
}
status = -ETIME;
goto end;
}
- pr_debug(PREFIX "transaction start\n");
- /* disable GPE during transaction if storm is detected */
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- acpi_disable_gpe(NULL, ec->gpe);
- }
-
status = acpi_ec_transaction_unlocked(ec, t);
-
- /* check if we received SCI during transaction */
- ec_check_sci_sync(ec, acpi_ec_read_status(ec));
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- msleep(1);
- /* it is safe to enable GPE outside of transaction */
- acpi_enable_gpe(NULL, ec->gpe);
- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
- pr_info(PREFIX "GPE storm detected, "
- "transactions will use polling mode\n");
- set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
- }
- pr_debug(PREFIX "transaction end\n");
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
EXPORT_SYMBOL(ec_transaction);
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
+static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
{
int result;
u8 d;
.wlen = 0, .rlen = 1};
if (!ec || !data)
return -EINVAL;
+
/*
* Query the EC to find out which _Qxx method we need to evaluate.
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
- result = acpi_ec_transaction_unlocked(ec, &t);
+
+ result = acpi_ec_transaction(ec, &t);
if (result)
return result;
+
if (!d)
return -ENODATA;
+
*data = d;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
-static void acpi_ec_run(void *cxt)
-{
- struct acpi_ec_query_handler *handler = cxt;
- if (!handler)
- return;
- pr_debug(PREFIX "start query execution\n");
- if (handler->func)
- handler->func(handler->data);
- else if (handler->handle)
- acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
- pr_debug(PREFIX "stop query execution\n");
- kfree(handler);
-}
-
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static void acpi_ec_gpe_query(void *ec_cxt)
{
+ struct acpi_ec *ec = ec_cxt;
u8 value = 0;
- int status;
- struct acpi_ec_query_handler *handler, *copy;
- if ((status = acpi_ec_query_unlocked(ec, &value)))
- return status;
+ struct acpi_ec_query_handler *handler, copy;
+
+ if (!ec || acpi_ec_query(ec, &value))
+ return;
+ mutex_lock(&ec->lock);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
- copy = kmalloc(sizeof(*handler), GFP_KERNEL);
- if (!copy)
- return -ENOMEM;
- memcpy(copy, handler, sizeof(*copy));
- pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
- return acpi_os_execute(OSL_GPE_HANDLER,
- acpi_ec_run, copy);
+ memcpy(©, handler, sizeof(copy));
+ mutex_unlock(&ec->lock);
+ if (copy.func) {
+ copy.func(copy.data);
+ } else if (copy.handle) {
+ acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
+ }
+ return;
}
}
- return 0;
-}
-
-static void acpi_ec_gpe_query(void *ec_cxt)
-{
- struct acpi_ec *ec = ec_cxt;
- if (!ec)
- return;
- mutex_lock(&ec->lock);
- acpi_ec_sync_query(ec);
mutex_unlock(&ec->lock);
}
-static void acpi_ec_gpe_query(void *ec_cxt);
-
-static int ec_check_sci(struct acpi_ec *ec, u8 state)
-{
- if (state & ACPI_EC_FLAG_SCI) {
- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
- pr_debug(PREFIX "push gpe query to the queue\n");
- return acpi_os_execute(OSL_NOTIFY_HANDLER,
- acpi_ec_gpe_query, ec);
- }
- }
- return 0;
-}
-
static u32 acpi_ec_gpe_handler(void *data)
{
struct acpi_ec *ec = data;
+ u8 status;
pr_debug(PREFIX "~~~> interrupt\n");
+ status = acpi_ec_read_status(ec);
- advance_transaction(ec, acpi_ec_read_status(ec));
- if (ec_transaction_done(ec) &&
- (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
+ advance_transaction(ec, status);
+ if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
wake_up(&ec->wait);
- ec_check_sci(ec, acpi_ec_read_status(ec));
- }
+ ec_check_sci(ec, status);
return ACPI_INTERRUPT_HANDLED;
}
/* MSI EC needs special treatment, enable it */
static int ec_flag_msi(const struct dmi_system_id *id)
{
- printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
EC_FLAGS_MSI = 1;
EC_FLAGS_VALIDATE_ECDT = 1;
return 0;
DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
{
ec_flag_msi, "MSI hardware", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
- {
- ec_flag_msi, "MSI hardware", {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
- {
- ec_flag_msi, "MSI hardware", {
- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
+ DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
{
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
- { set_max_cstate, "Pavilion zv5000", {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
- (void *)1},
- { set_max_cstate, "Asus L8400B", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
- (void *)1},
{},
};
pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
- /*
- * FADT specified C2 latency must be less than or equal to
- * 100 microseconds.
- */
- if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
- /* invalidate C2 */
- pr->power.states[ACPI_STATE_C2].address = 0;
- }
-
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
pr->power.states[ACPI_STATE_C2].address,
if (!cx->address)
return;
+ /*
+ * C2 latency must be less than or equal to 100
+ * microseconds.
+ */
+ else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "latency too large [%d]\n", cx->latency));
+ return;
+ }
+
/*
* Otherwise we've met all of our C2 requirements.
* Normalize the C2 latency to expidite policy
{
struct acpi_bus_ops ops;
- if (!device)
- return -EINVAL;
-
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
board_ahci_mcp65 = 6,
board_ahci_nopmp = 7,
board_ahci_yesncq = 8,
- board_ahci_nosntf = 9,
/* global controller registers */
HOST_CAP = 0x00, /* host capabilities */
AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
link offline */
- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
/* ap->flags bits */
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_yesncq] =
+ /* board_ahci_yesncq */
{
AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nosntf] =
- {
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
- },
};
static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
cap &= ~HOST_CAP_PMP;
}
- if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do SNTF, turning off CAP_SNTF\n");
- cap &= ~HOST_CAP_SNTF;
- }
-
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
port_map != 1) {
dev_printk(KERN_INFO, &pdev->dev,
},
.driver_data = "F.23", /* cutoff BIOS version */
},
- /*
- * Acer eMachines G725 has the same problem. BIOS
- * V1.03 is known to be broken. V3.04 is known to
- * work. Inbetween, there are V1.06, V2.06 and V3.03
- * that we don't have much idea about. For now,
- * blacklist anything older than V3.04.
- */
- {
- .ident = "G725",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
- DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
- },
- .driver_data = "V3.04", /* cutoff BIOS version */
- },
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);
(timings[pio][1] << 8);
}
- if (ap->udma_mask)
+ if (ap->udma_mask) {
udma_enable &= ~(1 << devid);
-
- pci_write_config_word(dev, master_port, master_data);
+ pci_write_config_word(dev, master_port, master_data);
+ }
}
/* Don't scribble on 0x48 if the controller does not support UDMA */
if (ap->udma_mask)
int sata_link_resume(struct ata_link *link, const unsigned long *params,
unsigned long deadline)
{
- int tries = ATA_LINK_RESUME_TRIES;
u32 scontrol, serror;
int rc;
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
return rc;
- /*
- * Writes to SControl sometimes get ignored under certain
- * controllers (ata_piix SIDPR). Make sure DET actually is
- * cleared.
- */
- do {
- scontrol = (scontrol & 0x0f0) | 0x300;
- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
- return rc;
- /*
- * Some PHYs react badly if SStatus is pounded
- * immediately after resuming. Delay 200ms before
- * debouncing.
- */
- msleep(200);
+ scontrol = (scontrol & 0x0f0) | 0x300;
- /* is SControl restored correctly? */
- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
- return rc;
- } while ((scontrol & 0xf0f) != 0x300 && --tries);
-
- if ((scontrol & 0xf0f) != 0x300) {
- ata_link_printk(link, KERN_ERR,
- "failed to resume link (SControl %X)\n",
- scontrol);
- return 0;
- }
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ return rc;
- if (tries < ATA_LINK_RESUME_TRIES)
- ata_link_printk(link, KERN_WARNING,
- "link resume succeeded after %d retries\n",
- ATA_LINK_RESUME_TRIES - tries);
+ /* Some PHYs react badly if SStatus is pounded immediately
+ * after resuming. Delay 200ms before debouncing.
+ */
+ msleep(200);
if ((rc = sata_link_debounce(link, params, deadline)))
return rc;
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
/* determine whether the command is worth retrying */
- if (qc->flags & ATA_QCFLAG_IO ||
- (!(qc->err_mask & AC_ERR_INVALID) &&
- qc->err_mask != AC_ERR_DEV))
+ if (!(qc->err_mask & AC_ERR_INVALID) &&
+ ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
qc->flags |= ATA_QCFLAG_RETRY;
/* accumulate error info */
do_write);
}
- if (!do_write)
- flush_dcache_page(page);
-
qc->curbytes += qc->sect_size;
qc->cursg_ofs += qc->sect_size;
regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
/* Merge the control bits */
regU |= 1 << adev->devno; /* UDMA on */
- if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
+ if (adev->dma_mode > 2) /* 15nS timing */
regU |= 4 << adev->devno;
} else {
regU &= ~ (1 << adev->devno); /* UDMA off */
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.14"
+#define DRV_VERSION "0.6.12"
struct hpt_clock {
u8 xfer_speed;
pci_read_config_dword(pdev, addr1, ®);
mode = hpt37x_find_mode(ap, adev->pio_mode);
- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
- reg &= ~0xCFC3FFFF; /* Strip timing bits */
+ mode &= ~0x8000000; /* No FIFO in PIO */
+ mode &= ~0x30070000; /* Leave config bits alone */
+ reg &= 0x30070000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
- u32 reg, mode, mask;
+ u32 reg;
+ u32 mode;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
fast |= 0x01;
pci_write_config_byte(pdev, addr2, fast);
- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
-
pci_read_config_dword(pdev, addr1, ®);
mode = hpt37x_find_mode(ap, adev->dma_mode);
- mode &= mask;
- reg &= ~mask;
+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
+ mode &= ~0xC0000000; /* Leave config bits alone */
+ reg &= 0xC0000000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
mode = hpt37x_find_mode(ap, adev->pio_mode);
printk("Find mode for %d reports %X\n", adev->pio_mode, mode);
- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
- reg &= ~0xCFC3FFFF; /* Strip timing bits */
+ mode &= ~0x80000000; /* No FIFO in PIO */
+ mode &= ~0x30070000; /* Leave config bits alone */
+ reg &= 0x30070000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
- u32 reg, mode, mask;
+ u32 reg;
+ u32 mode;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
fast &= ~0x07;
pci_write_config_byte(pdev, addr2, fast);
- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
-
pci_read_config_dword(pdev, addr1, ®);
mode = hpt37x_find_mode(ap, adev->dma_mode);
printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode);
- mode &= mask;
- reg &= ~mask;
+ mode &= ~0xC0000000; /* Leave config bits alone */
+ mode |= 0x80000000; /* FIFO in MWDMA or UDMA */
+ reg &= 0xC0000000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
*
*
* TODO
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.8"
+#define DRV_VERSION "0.3.4"
enum {
HPT_PCI_FAST = (1 << 31),
pci_read_config_dword(pdev, addr1, ®);
mode = hpt3x2n_find_mode(ap, adev->pio_mode);
- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
- reg &= ~0xCFC3FFFF; /* Strip timing bits */
+ mode &= ~0x8000000; /* No FIFO in PIO */
+ mode &= ~0x30070000; /* Leave config bits alone */
+ reg &= 0x30070000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
- u32 reg, mode, mask;
+ u32 reg;
+ u32 mode;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
fast &= ~0x07;
pci_write_config_byte(pdev, addr2, fast);
- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
-
pci_read_config_dword(pdev, addr1, ®);
mode = hpt3x2n_find_mode(ap, adev->dma_mode);
- mode &= mask;
- reg &= ~mask;
+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
+ mode &= ~0xC0000000; /* Leave config bits alone */
+ reg &= 0xC0000000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
static void hpt3x2n_set_clock(struct ata_port *ap, int source)
{
- void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
+ void __iomem *bmdma = ap->ioaddr.bmdma_addr;
/* Tristate the bus */
iowrite8(0x80, bmdma+0x73);
iowrite8(source, bmdma+0x7B);
iowrite8(0xC0, bmdma+0x79);
- /* Reset state machines, avoid enabling the disabled channels */
- iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
- iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
+ /* Reset state machines */
+ iowrite8(0x37, bmdma+0x70);
+ iowrite8(0x37, bmdma+0x74);
/* Complete reset */
iowrite8(0x00, bmdma+0x79);
iowrite8(0x00, bmdma+0x77);
}
+/* Check if our partner interface is busy */
+
+static int hpt3x2n_pair_idle(struct ata_port *ap)
+{
+ struct ata_host *host = ap->host;
+ struct ata_port *pair = host->ports[ap->port_no ^ 1];
+
+ if (pair->hsm_task_state == HSM_ST_IDLE)
+ return 1;
+ return 0;
+}
+
static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
{
long flags = (long)ap->host->private_data;
-
/* See if we should use the DPLL */
if (writing)
return USE_DPLL; /* Needed for write */
return 0;
}
-static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
- int rc, flags = (long)ap->host->private_data;
- int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
-
- /* First apply the usual rules */
- rc = ata_std_qc_defer(qc);
- if (rc != 0)
- return rc;
-
- if ((flags & USE_DPLL) != dpll && alt->qc_active)
- return ATA_DEFER_PORT;
- return 0;
-}
-
static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
{
+ struct ata_taskfile *tf = &qc->tf;
struct ata_port *ap = qc->ap;
int flags = (long)ap->host->private_data;
- int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
-
- if ((flags & USE_DPLL) != dpll) {
- flags &= ~USE_DPLL;
- flags |= dpll;
- ap->host->private_data = (void *)(long)flags;
- hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
+ if (hpt3x2n_pair_idle(ap)) {
+ int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
+ if ((flags & USE_DPLL) != dpll) {
+ if (dpll == 1)
+ hpt3x2n_set_clock(ap, 0x21);
+ else
+ hpt3x2n_set_clock(ap, 0x23);
+ }
}
return ata_sff_qc_issue(qc);
}
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt3x2n_bmdma_stop,
-
- .qc_defer = hpt3x2n_qc_defer,
.qc_issue = hpt3x2n_qc_issue,
.cable_detect = hpt3x2n_cable_detect,
unsigned int f_low, f_high;
int adjust;
unsigned long iobase = pci_resource_start(dev, 4);
- void *hpriv = (void *)USE_DPLL;
+ void *hpriv = NULL;
int rc;
rc = pcim_enable_device(dev);
/* Set our private data up. We only need a few flags so we use
it directly */
if (pci_mhz > 60) {
- hpriv = (void *)(PCI66 | USE_DPLL);
+ hpriv = (void *)PCI66;
/*
* On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
* the MISC. register to stretch the UltraDMA Tss timing.
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
-
- kfree(cp);
}
static struct sysfs_ops class_sysfs_ops = {
*/
const char *dev_driver_string(const struct device *dev)
{
- struct device_driver *drv;
-
- /* dev->driver can change to NULL underneath us because of unbinding,
- * so be careful about accessing it. dev->bus and dev->class should
- * never change once they are set, so they don't need special care.
- */
- drv = ACCESS_ONCE(dev->driver);
- return drv ? drv->name :
+ return dev->driver ? dev->driver->name :
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
}
{
int err;
struct vfsmount *mnt;
- char options[] = "mode=0755";
err = register_filesystem(&dev_fs_type);
if (err) {
return err;
}
- mnt = kern_mount_data(&dev_fs_type, options);
+ mnt = kern_mount(&dev_fs_type);
if (IS_ERR(mnt)) {
err = PTR_ERR(mnt);
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
}
if (parent) {
- spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&parent->power.lock);
/*
* It is invalid to put an active child under a parent that is
if (*pos > h->highest_lun)
return 0;
- if (drv == NULL) /* it's possible for h->drv[] to have holes. */
- return 0;
-
if (drv->heads == 0)
return 0;
pkt_kobj_remove(pd->kobj_stat);
pkt_kobj_remove(pd->kobj_wqueue);
if (class_pktcdvd)
- device_unregister(pd->dev);
+ device_destroy(class_pktcdvd, pd->pkt_dev);
}
return;
usb_anchor_urb(urb, &data->bulk_anchor);
- usb_mark_last_busy(data->udev);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
* popup and for the GTT.
*/
int gtt_entries; /* i830+ */
- int gtt_total_size;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
writel(agp_bridge->scratch_page, intel_private.gtt+i);
}
readl(intel_private.gtt+i-1); /* PCI Posting. */
if (!intel_private.gtt)
return -ENOMEM;
- intel_private.gtt_total_size = gtt_map_size / 4;
-
temp &= 0xfff80000;
intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.gtt)
return -ENOMEM;
- intel_private.gtt_total_size = gtt_size / 4;
-
intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
iounmap(intel_private.gtt);
# include <linux/efi.h>
#endif
-static inline unsigned long size_inside_page(unsigned long start,
- unsigned long size)
-{
- unsigned long sz;
-
- if (-start & (PAGE_SIZE - 1))
- sz = -start & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- return min_t(unsigned long, sz, size);
-}
-
/*
* Architectures vary in how they handle caching for addresses
* outside of main memory.
unsigned long p = *ppos;
ssize_t low_count, read, sz;
char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
- int err = 0;
read = 0;
if (p < (unsigned long) high_memory) {
}
#endif
while (low_count > 0) {
- sz = size_inside_page(p, low_count);
+ /*
+ * Handle first page in case it's not aligned
+ */
+ if (-p & (PAGE_SIZE - 1))
+ sz = -p & (PAGE_SIZE - 1);
+ else
+ sz = PAGE_SIZE;
+
+ sz = min_t(unsigned long, sz, low_count);
/*
* On ia64 if a page has been mapped somewhere as
if (!kbuf)
return -ENOMEM;
while (count > 0) {
- int len = size_inside_page(p, count);
+ int len = count;
- if (!is_vmalloc_or_module_addr((void *)p)) {
- err = -ENXIO;
- break;
- }
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
len = vread(kbuf, (char *)p, len);
if (!len)
break;
if (copy_to_user(buf, kbuf, len)) {
- err = -EFAULT;
- break;
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
}
count -= len;
buf += len;
}
free_page((unsigned long)kbuf);
}
- *ppos = p;
- return read ? read : err;
+ *ppos = p;
+ return read;
}
while (count > 0) {
char *ptr;
+ /*
+ * Handle first page in case it's not aligned
+ */
+ if (-realp & (PAGE_SIZE - 1))
+ sz = -realp & (PAGE_SIZE - 1);
+ else
+ sz = PAGE_SIZE;
- sz = size_inside_page(realp, count);
+ sz = min_t(unsigned long, sz, count);
/*
* On ia64 if a page has been mapped somewhere as
ssize_t virtr = 0;
ssize_t written;
char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
- int err = 0;
if (p < (unsigned long) high_memory) {
if (!kbuf)
return wrote ? wrote : -ENOMEM;
while (count > 0) {
- int len = size_inside_page(p, count);
+ int len = count;
- if (!is_vmalloc_or_module_addr((void *)p)) {
- err = -ENXIO;
- break;
- }
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
if (len) {
written = copy_from_user(kbuf, buf, len);
if (written) {
- err = -EFAULT;
- break;
+ if (wrote + virtr)
+ break;
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
}
}
- vwrite(kbuf, (char *)p, len);
+ len = vwrite(kbuf, (char *)p, len);
count -= len;
buf += len;
virtr += len;
free_page((unsigned long)kbuf);
}
- *ppos = p;
- return virtr + wrote ? : err;
+ *ppos = p;
+ return virtr + wrote;
}
#endif
dc->open_ttys--;
port->count--;
+ tty_port_tty_set(port, NULL);
if (port->count == 0) {
DBG1("close: %d", nport->token_dl);
- tty_port_tty_set(port, NULL);
spin_lock_irqsave(&dc->spin_mutex, flags);
dc->last_ier &= ~(nport->token_dl);
writew(dc->last_ier, dc->reg_ier);
/* like a named pipe */
}
+ /*
+ * If we gave the user some bytes, update the access time.
+ */
+ if (count)
+ file_accessed(file);
+
return (count ? count : retval);
}
size_t count, loff_t *ppos)
{
size_t ret;
+ struct inode *inode = file->f_path.dentry->d_inode;
ret = write_pool(&blocking_pool, buffer, count);
if (ret)
if (ret)
return ret;
+ inode->i_mtime = current_fs_time(inode->i_sb);
+ mark_inode_dirty(inode);
return (ssize_t)count;
}
struct tpm_inf_dev {
int iotype;
- void __iomem *mem_base; /* MMIO ioremap'd addr */
- unsigned long map_base; /* phys MMIO base */
- unsigned long map_size; /* MMIO region size */
- unsigned int index_off; /* index register offset */
+ void __iomem *mem_base; /* MMIO ioremap'd addr */
+ unsigned long map_base; /* phys MMIO base */
+ unsigned long map_size; /* MMIO region size */
+ unsigned int index_off; /* index register offset */
- unsigned int data_regs; /* Data registers */
+ unsigned int data_regs; /* Data registers */
unsigned int data_size;
unsigned int config_port; /* IO Port config index reg */
.miscdev = {.fops = &inf_ops,},
};
-static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
+static const struct pnp_device_id tpm_pnp_tbl[] = {
/* Infineon TPMs */
{"IFX0101", 0},
{"IFX0102", 0},
{"", 0}
};
-MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
!(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
- tpm_dev.iotype = TPM_INF_IO_PORT;
+ tpm_dev.iotype = TPM_INF_IO_PORT;
tpm_dev.config_port = pnp_port_start(dev, 0);
tpm_dev.config_size = pnp_port_len(dev, 0);
goto err_last;
}
} else if (pnp_mem_valid(dev, 0) &&
- !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
- tpm_dev.iotype = TPM_INF_IO_MEM;
+ tpm_dev.iotype = TPM_INF_IO_MEM;
tpm_dev.map_base = pnp_mem_start(dev, 0);
tpm_dev.map_size = pnp_mem_len(dev, 0);
"product id 0x%02x%02x"
"%s\n",
tpm_dev.iotype == TPM_INF_IO_PORT ?
- tpm_dev.config_port :
- tpm_dev.map_base + tpm_dev.index_off,
+ tpm_dev.config_port :
+ tpm_dev.map_base + tpm_dev.index_off,
tpm_dev.iotype == TPM_INF_IO_PORT ?
- tpm_dev.data_regs :
- tpm_dev.map_base + tpm_dev.data_regs,
+ tpm_dev.data_regs :
+ tpm_dev.map_base + tpm_dev.data_regs,
version[0], version[1],
vendorid[0], vendorid[1],
productid[0], productid[1], chipname);
iounmap(tpm_dev.mem_base);
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
}
- tpm_dev_vendor_release(chip);
tpm_remove_hardware(chip->dev);
}
}
-static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
-{
- struct tpm_chip *chip = pnp_get_drvdata(dev);
- int rc;
- if (chip) {
- u8 savestate[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 10, /* blob length (in bytes) */
- 0, 0, 0, 152 /* TPM_ORD_SaveState */
- };
- dev_info(&dev->dev, "saving TPM state\n");
- rc = tpm_inf_send(chip, savestate, sizeof(savestate));
- if (rc < 0) {
- dev_err(&dev->dev, "error while saving TPM state\n");
- return rc;
- }
- }
- return 0;
-}
-
-static int tpm_inf_pnp_resume(struct pnp_dev *dev)
-{
- /* Re-configure TPM after suspending */
- tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
- tpm_config_out(IOLIMH, TPM_INF_ADDR);
- tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
- tpm_config_out(IOLIML, TPM_INF_ADDR);
- tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
- /* activate register */
- tpm_config_out(TPM_DAR, TPM_INF_ADDR);
- tpm_config_out(0x01, TPM_INF_DATA);
- tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
- /* disable RESET, LP and IRQC */
- tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
- return tpm_pm_resume(&dev->dev);
-}
-
static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
- .id_table = tpm_inf_pnp_tbl,
+ .driver = {
+ .owner = THIS_MODULE,
+ .suspend = tpm_pm_suspend,
+ .resume = tpm_pm_resume,
+ },
+ .id_table = tpm_pnp_tbl,
.probe = tpm_inf_pnp_probe,
- .suspend = tpm_inf_pnp_suspend,
- .resume = tpm_inf_pnp_resume,
- .remove = __devexit_p(tpm_inf_pnp_remove)
+ .remove = __devexit_p(tpm_inf_pnp_remove),
};
static int __init init_inf(void)
MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
-MODULE_VERSION("1.9.2");
+MODULE_VERSION("1.9");
MODULE_LICENSE("GPL");
pid = task_pid(current);
type = PIDTYPE_PID;
}
- get_pid(pid);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
retval = __f_setown(filp, pid, type, 0);
- put_pid(pid);
if (retval)
goto out;
} else {
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
+static u32 cn_idx = CN_IDX_CONNECTOR;
+static u32 cn_val = CN_VAL_CONNECTOR;
+
+module_param(cn_idx, uint, 0);
+module_param(cn_val, uint, 0);
+MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
+MODULE_PARM_DESC(cn_val, "Connector's main device val.");
+
+static DEFINE_MUTEX(notify_lock);
+static LIST_HEAD(notify_list);
+
static struct cn_dev cdev;
static int cn_already_initialized;
}
}
+/*
+ * Notification routing.
+ *
+ * Gets id and checks if there are notification request for it's idx
+ * and val. If there are such requests notify the listeners with the
+ * given notify event.
+ *
+ */
+static void cn_notify(struct cb_id *id, u32 notify_event)
+{
+ struct cn_ctl_entry *ent;
+
+ mutex_lock(¬ify_lock);
+ list_for_each_entry(ent, ¬ify_list, notify_entry) {
+ int i;
+ struct cn_notify_req *req;
+ struct cn_ctl_msg *ctl = ent->msg;
+ int idx_found, val_found;
+
+ idx_found = val_found = 0;
+
+ req = (struct cn_notify_req *)ctl->data;
+ for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
+ if (id->idx >= req->first &&
+ id->idx < req->first + req->range) {
+ idx_found = 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
+ if (id->val >= req->first &&
+ id->val < req->first + req->range) {
+ val_found = 1;
+ break;
+ }
+ }
+
+ if (idx_found && val_found) {
+ struct cn_msg m = { .ack = notify_event, };
+
+ memcpy(&m.id, id, sizeof(m.id));
+ cn_netlink_send(&m, ctl->group, GFP_KERNEL);
+ }
+ }
+ mutex_unlock(¬ify_lock);
+}
+
/*
* Callback add routing - adds callback with given ID and name.
* If there is registered callback with the same ID it will not be added.
if (err)
return err;
+ cn_notify(id, 0);
+
return 0;
}
EXPORT_SYMBOL_GPL(cn_add_callback);
struct cn_dev *dev = &cdev;
cn_queue_del_callback(dev->cbdev, id);
+ cn_notify(id, 1);
}
EXPORT_SYMBOL_GPL(cn_del_callback);
+/*
+ * Checks two connector's control messages to be the same.
+ * Returns 1 if they are the same or if the first one is corrupted.
+ */
+static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
+{
+ int i;
+ struct cn_notify_req *req1, *req2;
+
+ if (m1->idx_notify_num != m2->idx_notify_num)
+ return 0;
+
+ if (m1->val_notify_num != m2->val_notify_num)
+ return 0;
+
+ if (m1->len != m2->len)
+ return 0;
+
+ if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
+ m1->len)
+ return 1;
+
+ req1 = (struct cn_notify_req *)m1->data;
+ req2 = (struct cn_notify_req *)m2->data;
+
+ for (i = 0; i < m1->idx_notify_num; ++i) {
+ if (req1->first != req2->first || req1->range != req2->range)
+ return 0;
+ req1++;
+ req2++;
+ }
+
+ for (i = 0; i < m1->val_notify_num; ++i) {
+ if (req1->first != req2->first || req1->range != req2->range)
+ return 0;
+ req1++;
+ req2++;
+ }
+
+ return 1;
+}
+
+/*
+ * Main connector device's callback.
+ *
+ * Used for notification of a request's processing.
+ */
+static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+{
+ struct cn_ctl_msg *ctl;
+ struct cn_ctl_entry *ent;
+ u32 size;
+
+ if (msg->len < sizeof(*ctl))
+ return;
+
+ ctl = (struct cn_ctl_msg *)msg->data;
+
+ size = (sizeof(*ctl) + ((ctl->idx_notify_num +
+ ctl->val_notify_num) *
+ sizeof(struct cn_notify_req)));
+
+ if (msg->len != size)
+ return;
+
+ if (ctl->len + sizeof(*ctl) != msg->len)
+ return;
+
+ /*
+ * Remove notification.
+ */
+ if (ctl->group == 0) {
+ struct cn_ctl_entry *n;
+
+ mutex_lock(¬ify_lock);
+ list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) {
+ if (cn_ctl_msg_equals(ent->msg, ctl)) {
+ list_del(&ent->notify_entry);
+ kfree(ent);
+ }
+ }
+ mutex_unlock(¬ify_lock);
+
+ return;
+ }
+
+ size += sizeof(*ent);
+
+ ent = kzalloc(size, GFP_KERNEL);
+ if (!ent)
+ return;
+
+ ent->msg = (struct cn_ctl_msg *)(ent + 1);
+
+ memcpy(ent->msg, ctl, size - sizeof(*ent));
+
+ mutex_lock(¬ify_lock);
+ list_add(&ent->notify_entry, ¬ify_list);
+ mutex_unlock(¬ify_lock);
+}
+
static int cn_proc_show(struct seq_file *m, void *v)
{
struct cn_queue_dev *dev = cdev.cbdev;
static int __devinit cn_init(void)
{
struct cn_dev *dev = &cdev;
+ int err;
dev->input = cn_rx_skb;
+ dev->id.idx = cn_idx;
+ dev->id.val = cn_val;
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
CN_NETLINK_USERS + 0xf,
cn_already_initialized = 1;
+ err = cn_add_callback(&dev->id, "connector", &cn_callback);
+ if (err) {
+ cn_already_initialized = 0;
+ cn_queue_free_dev(dev->cbdev);
+ netlink_kernel_release(dev->nls);
+ return -EINVAL;
+ }
+
proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
return 0;
proc_net_remove(&init_net, "connector");
+ cn_del_callback(&dev->id);
cn_queue_free_dev(dev->cbdev);
netlink_kernel_release(dev->nls);
}
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/sched.h>
-#include <linux/math64.h>
#define BUCKETS 12
#define RESOLUTION 1024
static void menu_update(struct cpuidle_device *dev);
-/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
-static u64 div_round64(u64 dividend, u32 divisor)
-{
- return div_u64(dividend + (divisor / 2), divisor);
-}
-
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
/* Make sure to round up for half microseconds */
- data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
- RESOLUTION * DECAY);
+ data->predicted_us = DIV_ROUND_CLOSEST(
+ data->expected_us * data->correction_factor[data->bucket],
+ RESOLUTION * DECAY);
/*
* We want to default to C1 (hlt), not to busy polling
return crypto_shash_update(&dctx->fallback, data, length);
}
-static int padlock_sha_export(struct shash_desc *desc, void *out)
-{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fallback, out);
-}
-
-static int padlock_sha_import(struct shash_desc *desc, const void *in)
-{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
-
- dctx->fallback.tfm = ctx->fallback;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_shash_import(&dctx->fallback, in);
-}
-
static inline void padlock_output_block(uint32_t *src,
uint32_t *dst, size_t count)
{
.update = padlock_sha_update,
.finup = padlock_sha1_finup,
.final = padlock_sha1_final,
- .export = padlock_sha_export,
- .import = padlock_sha_import,
.descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
.final = padlock_sha256_final,
- .export = padlock_sha_export,
- .import = padlock_sha_import,
.descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
cookie, done ? *done : 0, used ? *used : 0);
- spin_lock_bh(&atchan->lock);
+ spin_lock_bh(atchan->lock);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_bh(atchan->lock);
if (done)
*done = last_complete;
dma->dev = &pdev->dev;
if (!dma->chancnt) {
- dev_err(dev, "channel enumeration error\n");
+ dev_err(dev, "zero channels detected\n");
goto err_setup_interrupts;
}
* @dca: direct cache access context
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
- * @reset_hw: hw version specific channel (re)initialization
* @cleanup_tasklet: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type
struct dca_provider *dca;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
- int (*reset_hw)(struct ioat_chan_common *chan);
void (*cleanup_tasklet)(unsigned long data);
void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device);
writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
-static inline void ioat_reset(struct ioat_chan_common *chan)
-{
- u8 ver = chan->device->version;
-
- writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
-}
-
-static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
-{
- u8 ver = chan->device->version;
- u8 cmd;
-
- cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
- return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
-}
-
static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
{
struct ioat_chan_common *chan = &ioat->base;
__ioat2_start_null_desc(ioat);
}
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
{
- unsigned long end = jiffies + tmo;
- int err = 0;
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
- if (tmo && time_after(jiffies, end)) {
- err = -ETIMEDOUT;
- break;
- }
status = ioat_chansts(chan);
cpu_relax();
}
- return err;
-}
-
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
-{
- unsigned long end = jiffies + tmo;
- int err = 0;
-
- ioat_reset(chan);
- while (ioat_reset_pending(chan)) {
- if (end && time_after(jiffies, end)) {
- err = -ETIMEDOUT;
- break;
- }
- cpu_relax();
- }
-
- return err;
-}
-
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
-
- ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
-static int ioat2_reset_hw(struct ioat_chan_common *chan)
-{
- /* throw away whatever the channel was doing and get it initialized */
- u32 chanerr;
-
- ioat2_quiesce(chan, msecs_to_jiffies(100));
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
-}
-
/**
* ioat2_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
(unsigned long) ioat);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock);
- if (device->reset_hw(&ioat->base)) {
- i = 0;
- break;
- }
}
dma->chancnt = i;
return i;
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent **ring;
+ u32 chanerr;
int order;
/* have we already been set up? */
/* Setup register to interrupt and write completion status on error */
writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ if (chanerr) {
+ dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+ }
+
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
chan->completion = pci_pool_alloc(chan->device->completion_pool,
tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer);
device->cleanup_tasklet((unsigned long) ioat);
- device->reset_hw(chan);
+
+ /* Delay 100ms after reset to allow internal DMA logic to quiesce
+ * before removing DMA descriptor resources.
+ */
+ writeb(IOAT_CHANCMD_RESET,
+ chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+ mdelay(100);
spin_lock_bh(&ioat->ring_lock);
descs = ioat2_ring_space(ioat);
int err;
device->enumerate_channels = ioat2_enumerate_channels;
- device->reset_hw = ioat2_reset_hw;
device->cleanup_tasklet = ioat2_cleanup_tasklet;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
void ioat2_cleanup_tasklet(unsigned long data);
void ioat2_timer_event(unsigned long data);
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
extern struct kobj_type ioat2_ktype;
extern struct kmem_cache *ioat2_cache;
#endif /* IOATDMA_V2_H */
num_descs = ioat2_xferlen_to_descs(ioat, len);
/* we need 2x the number of descriptors to cover greater than 3
- * sources (we need 1 extra source in the q-only continuation
- * case and 3 extra sources in the p+q continuation case.
+ * sources
*/
- if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
- (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
+ if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
with_ext = 1;
num_descs *= 2;
} else
return 0;
}
-static int ioat3_reset_hw(struct ioat_chan_common *chan)
-{
- /* throw away whatever the channel was doing and get it
- * initialized, with ioat3 specific workarounds
- */
- struct ioatdma_device *device = chan->device;
- struct pci_dev *pdev = device->pdev;
- u32 chanerr;
- u16 dev_id;
- int err;
-
- ioat2_quiesce(chan, msecs_to_jiffies(100));
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
- /* -= IOAT ver.3 workarounds =- */
- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3, and clear any
- * pending errors
- */
- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
- err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
- if (err) {
- dev_err(&pdev->dev, "channel error register unreachable\n");
- return err;
- }
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
-
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
-
- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
-}
-
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
+ u16 dev_id;
u32 cap;
device->enumerate_channels = ioat2_enumerate_channels;
- device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
dma->device_prep_dma_xor_val = NULL;
#endif
+ /* -= IOAT ver.3 workarounds =- */
+ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
+ * that can cause stability issues for IOAT ver.3
+ */
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+
err = ioat_probe(device);
if (err)
return err;
#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
-#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
/* MMIO Device Registers */
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
-static struct msr *msrs;
-
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
return empty;
}
-/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- if (amd_get_nb_id(cpu) == nid)
- cpumask_set_cpu(cpu, mask);
-}
-
-/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
-{
- cpumask_var_t mask;
- int cpu, nbe;
- bool ret = false;
-
- if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
- amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
- __func__);
- return false;
- }
-
- get_cpus_on_this_dct_cpumask(mask, nid);
-
- rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
-
- for_each_cpu(cpu, mask) {
- struct msr *reg = per_cpu_ptr(msrs, cpu);
- nbe = reg->l & K8_MSR_MCGCTL_NBE;
-
- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
-
- if (!nbe)
- goto out;
- }
- ret = true;
-
-out:
- free_cpumask_var(mask);
- return ret;
-}
-
-static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
-{
- cpumask_var_t cmask;
- int cpu;
-
- if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
- amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
- __func__);
- return false;
- }
-
- get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
-
- rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
-
- for_each_cpu(cpu, cmask) {
-
- struct msr *reg = per_cpu_ptr(msrs, cpu);
-
- if (on) {
- if (reg->l & K8_MSR_MCGCTL_NBE)
- pvt->flags.ecc_report = 1;
-
- reg->l |= K8_MSR_MCGCTL_NBE;
- } else {
- /*
- * Turn off ECC reporting only when it was off before
- */
- if (!pvt->flags.ecc_report)
- reg->l &= ~K8_MSR_MCGCTL_NBE;
- }
- }
- wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
-
- free_cpumask_var(cmask);
-
- return 0;
-}
-
/*
* Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
* enable it.
static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
- int err = 0;
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
if (!ecc_enable_override)
return;
+ memset(msrs, 0, sizeof(msrs));
+
amd64_printk(KERN_WARNING,
"'ecc_enable_override' parameter is active, "
"Enabling AMD ECC hardware now: CAUTION\n");
value |= mask;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
- if (amd64_toggle_ecc_err_reporting(pvt, ON))
- amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
- "MCGCTL!\n");
+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ for_each_cpu(cpu, cpumask) {
+ if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+ set_bit(idx, &pvt->old_mcgctl);
+
+ msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+ idx++;
+ }
+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
if (err)
static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
{
- int err = 0;
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
if (!pvt->nbctl_mcgctl_saved)
return;
+ memset(msrs, 0, sizeof(msrs));
+
err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
if (err)
debugf0("Reading K8_NBCTL failed\n");
/* restore the NB Enable MCGCTL bit */
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
- if (amd64_toggle_ecc_err_reporting(pvt, OFF))
- amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
- "MCGCTL!\n");
+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ for_each_cpu(cpu, cpumask) {
+ msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+ msrs[idx].l |=
+ test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
+ idx++;
+ }
+
+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+}
+
+/* get all cores on this DCT */
+static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ if (amd_get_nb_id(cpu) == nid)
+ cpumask_set_cpu(cpu, mask);
+}
+
+/* check MCG_CTL on all the cpus on this node */
+static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+{
+ cpumask_t mask;
+ struct msr *msrs;
+ int cpu, nbe, idx = 0;
+ bool ret = false;
+
+ cpumask_clear(&mask);
+
+ get_cpus_on_this_dct_cpumask(&mask, nid);
+
+ msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
+ if (!msrs) {
+ amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+ __func__);
+ return false;
+ }
+
+ rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
+
+ for_each_cpu(cpu, &mask) {
+ nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+
+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+ cpu, msrs[idx].q,
+ (nbe ? "enabled" : "disabled"));
+
+ if (!nbe)
+ goto out;
+
+ idx++;
+ }
+ ret = true;
+
+out:
+ kfree(msrs);
+ return ret;
}
/*
* the memory system completely. A command line option allows to force-enable
* hardware ECC later in amd64_enable_ecc_error_reporting().
*/
-static const char *ecc_msg =
- "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
- " Either enable ECC checking or force module loading by setting "
- "'ecc_enable_override'.\n"
- " (Note that use of the override may cause unknown side effects.)\n";
+static const char *ecc_warning =
+ "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
+ " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
+ " Also, use of the override can cause unknown side effects.\n";
static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
{
ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
if (!ecc_enabled)
- amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
+ amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
"is currently disabled, set F3x%x[22] (%s).\n",
K8_NBCFG, pci_name(pvt->misc_f3_ctl));
else
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
if (!nb_mce_en)
- amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
+ amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, pvt->mc_node_id);
if (!ecc_enabled || !nb_mce_en) {
if (!ecc_enable_override) {
- amd64_printk(KERN_NOTICE, "%s", ecc_msg);
+ amd64_printk(KERN_WARNING, "%s", ecc_warning);
return -ENODEV;
}
+ } else
+ /* CLEAR the override, since BIOS controlled it */
ecc_enable_override = 0;
- }
return 0;
}
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->mc_type_index = mc_type_index;
pvt->ops = family_ops(mc_type_index);
+ pvt->old_mcgctl = 0;
/*
* We have the dram_f2_ctl device as an argument, now go reserve its
amd64_free_mc_sibling_devices(pvt);
+ kfree(pvt);
+ mci->pvt_info = NULL;
+
+ mci_lookup[pvt->mc_node_id] = NULL;
+
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
amd_unregister_ecc_decoder(amd64_decode_bus_error);
/* Free the EDAC CORE resources */
- mci->pvt_info = NULL;
- mci_lookup[pvt->mc_node_id] = NULL;
-
- kfree(pvt);
edac_mc_free(mci);
}
static int __init amd64_edac_init(void)
{
int nb, err = -ENODEV;
- bool load_ok = false;
edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
opstate_init();
if (cache_k8_northbridges() < 0)
- goto err_ret;
-
- msrs = msrs_alloc();
- if (!msrs)
- goto err_ret;
+ return err;
err = pci_register_driver(&amd64_pci_driver);
if (err)
- goto err_pci;
+ return err;
/*
* At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
* amd64_pvt structs. These will be used in the 2nd stage init function
* to finish initialization of the MC instances.
*/
- err = -ENODEV;
for (nb = 0; nb < num_k8_northbridges; nb++) {
if (!pvt_lookup[nb])
continue;
err = amd64_init_2nd_stage(pvt_lookup[nb]);
if (err)
goto err_2nd_stage;
-
- load_ok = true;
}
- if (load_ok) {
- amd64_setup_pci_device();
- return 0;
- }
+ amd64_setup_pci_device();
+
+ return 0;
err_2nd_stage:
+ debugf0("2nd stage failed\n");
pci_unregister_driver(&amd64_pci_driver);
-err_pci:
- msrs_free(msrs);
- msrs = NULL;
-err_ret:
+
return err;
}
edac_pci_release_generic_ctl(amd64_ctl_pci);
pci_unregister_driver(&amd64_pci_driver);
-
- msrs_free(msrs);
- msrs = NULL;
}
module_init(amd64_edac_init);
#define MAX_CS_COUNT 8
#define DRAM_REG_COUNT 8
-#define ON true
-#define OFF false
/*
* PCI-defined configuration space registers
#define K8_NBCAP_DUAL_NODE BIT(1)
#define K8_NBCAP_DCT_DUAL BIT(0)
-/* MSRs */
+/*
+ * MSR Regs
+ */
+#define K8_MSR_MCGCTL 0x017b
#define K8_MSR_MCGCTL_NBE BIT(4)
#define K8_MSR_MC4CTL 0x0410
/* Save old hw registers' values before we modified them */
u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
u32 old_nbctl;
+ unsigned long old_mcgctl; /* per core on this node */
/* MC Type Index value: socket F vs Family 10h */
u32 mc_type_index;
/* misc settings */
struct flags {
unsigned long cf8_extcfg:1;
- unsigned long ecc_report:1;
} flags;
};
debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
-
- /*
- * According with i5000 datasheet, bit 28 has no significance
- * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
- */
- channel = branch & 2;
-
+ channel = branch;
bank = NREC_BANK(info->nrecmema);
rank = NREC_RANK(info->nrecmema);
rdwr = NREC_RDWR(info->nrecmema);
static LIST_HEAD(descriptor_list);
static int descriptor_count;
-/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
-static size_t config_rom_length = 1 + 4 + 1 + 1;
-
#define BIB_CRC(v) ((v) << 0)
#define BIB_CRC_LENGTH(v) ((v) << 16)
#define BIB_INFO_LENGTH(v) ((v) << 24)
#define BIB_CMC ((1) << 30)
#define BIB_IMC ((1) << 31)
-static u32 *generate_config_rom(struct fw_card *card)
+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
{
struct fw_descriptor *desc;
static u32 config_rom[256];
for (i = 0; i < j; i += length + 1)
length = fw_compute_block_crc(config_rom + i);
- WARN_ON(j != config_rom_length);
+ *config_rom_length = j;
return config_rom;
}
{
struct fw_card *card;
u32 *config_rom;
+ size_t length;
list_for_each_entry (card, &card_list, link) {
- config_rom = generate_config_rom(card);
- card->driver->set_config_rom(card, config_rom,
- config_rom_length);
+ config_rom = generate_config_rom(card, &length);
+ card->driver->set_config_rom(card, config_rom, length);
}
}
-static size_t required_space(struct fw_descriptor *desc)
-{
- /* descriptor + entry into root dir + optional immediate entry */
- return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
-}
-
int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
- int ret;
/*
* Check descriptor is valid; the length of all blocks in the
mutex_lock(&card_mutex);
- if (config_rom_length + required_space(desc) > 256) {
- ret = -EBUSY;
- } else {
- list_add_tail(&desc->link, &descriptor_list);
- config_rom_length += required_space(desc);
+ list_add_tail(&desc->link, &descriptor_list);
+ descriptor_count++;
+ if (desc->immediate > 0)
descriptor_count++;
- if (desc->immediate > 0)
- descriptor_count++;
- update_config_roms();
- ret = 0;
- }
+ update_config_roms();
mutex_unlock(&card_mutex);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(fw_core_add_descriptor);
mutex_lock(&card_mutex);
list_del(&desc->link);
- config_rom_length -= required_space(desc);
descriptor_count--;
if (desc->immediate > 0)
descriptor_count--;
u32 max_receive, u32 link_speed, u64 guid)
{
u32 *config_rom;
+ size_t length;
int ret;
card->max_receive = max_receive;
mutex_lock(&card_mutex);
- config_rom = generate_config_rom(card);
- ret = card->driver->enable(card, config_rom, config_rom_length);
+ config_rom = generate_config_rom(card, &length);
+ ret = card->driver->enable(card, config_rom, length);
if (ret == 0)
list_add_tail(&card->link, &card_list);
page = payload >> PAGE_SHIFT;
offset = payload & ~PAGE_MASK;
rest = p->payload_length;
- /*
- * The controllers I've tested have not worked correctly when
- * second_req_count is zero. Rather than do something we know won't
- * work, return an error
- */
- if (rest == 0)
- return -EINVAL;
/* FIXME: make packet-per-buffer/dual-buffer a context option */
while (rest > 0) {
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
- struct descriptor *d, *pd;
+ struct descriptor *d = NULL, *pd = NULL;
struct fw_iso_packet *p = packet;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
rest = payload_per_buffer;
- pd = d;
for (j = 1; j < z; j++) {
- pd++;
+ pd = d + j;
pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_MORE);
#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
-#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
#if !defined(CONFIG_X86_32)
/* dual-buffer mode is broken with descriptor addresses above 2G */
if (dev->vendor == PCI_VENDOR_ID_TI &&
- (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
- dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
+ dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
ohci->use_dualbuffer = false;
#endif
for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
- break;
+ continue;
if (dmi_ident[s]
&& strstr(dmi_ident[s], dmi->matches[i].substr))
continue;
return true;
}
-/**
- * dmi_is_end_of_table - check for end-of-table marker
- * @dmi: pointer to the dmi_system_id structure to check
- */
-static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
-{
- return dmi->matches[0].slot == DMI_NONE;
-}
-
/**
* dmi_check_system - check system DMI data
* @list: array of dmi_system_id structures to match against
int count = 0;
const struct dmi_system_id *d;
- for (d = list; !dmi_is_end_of_table(d); d++)
+ for (d = list; d->ident; d++)
if (dmi_matches(d)) {
count++;
if (d->callback && d->callback(d))
{
const struct dmi_system_id *d;
- for (d = list; !dmi_is_end_of_table(d); d++)
+ for (d = list; d->ident; d++)
if (dmi_matches(d))
return d;
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE);
+ PAGE_SIZE,
+ gart_info->table_mask);
if (gart_info->table_handle == NULL)
return -ENOMEM;
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
- DRM_ERROR("fail to set dma mask to 0x%Lx\n",
- gart_info->table_mask);
- ret = 1;
- goto done;
- }
-
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size);
+ dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
if (!dmah) {
kfree(map);
return -ENOMEM;
while (entry->buf_count < count) {
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
{
int count = 0;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev,
if (IS_ERR(obj->filp))
goto free;
+ /* Basically we want to disable the OOM killer and handle ENOMEM
+ * ourselves by sacrificing pages from cached buffers.
+ * XXX shmem_file_[gs]et_gfp_mask()
+ */
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
+ GFP_HIGHUSER |
+ __GFP_COLD |
+ __GFP_FS |
+ __GFP_RECLAIMABLE |
+ __GFP_NORETRY |
+ __GFP_NOWARN |
+ __GFP_NOMEMALLOC);
+
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
- if (!dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
- atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
- }
- }
- } else {
- if (!dev->vblank_enabled[crtc]) {
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
+ !dev->vblank_enabled[crtc]) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
- ret = -EINVAL;
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_put);
-void drm_vblank_off(struct drm_device *dev, int crtc)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-EXPORT_SYMBOL(drm_vblank_off);
-
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
+ dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
#if 1
if (align > size)
return NULL;
+ if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
+ DRM_ERROR("Setting pci dma mask failed\n");
+ return NULL;
+ }
+
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
obj = obj_priv->obj;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj, 0);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
DRM_ERROR("Failed to get pages: %d\n", ret);
spin_unlock(&dev_priv->mm.active_list_lock);
return 0;
}
+static int i915_registers_info(struct seq_file *m, void *data) {
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t reg;
+
+#define DUMP_RANGE(start, end) \
+ for (reg=start; reg < end; reg += 4) \
+ seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
+
+ DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
+ DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
+ DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
+ DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
+ DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
+ DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
+ DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
+ DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
+ DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
+ DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
+ DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
+ DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
+ DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
+ DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
+
+ return 0;
+}
+
+
static struct drm_info_list i915_debugfs_list[] = {
+ {"i915_regs", i915_registers_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
- unsigned long cfb_base;
- unsigned long ll_base = 0;
+ unsigned long cfb_base, ll_base;
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
if (ret)
goto destroy_ringbuffer;
- intel_modeset_init(dev);
-
ret = drm_irq_install(dev);
if (ret)
goto destroy_ringbuffer;
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
+ intel_modeset_init(dev);
+
drm_helper_initial_config(dev);
return 0;
*/
struct list_head flushing_list;
- /**
- * List of objects currently pending a GPU write flush.
- *
- * All elements on this list will belong to either the
- * active_list or flushing_list, last_rendering_seqno can
- * be used to differentiate between the two elements.
- */
- struct list_head gpu_write_list;
-
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
- struct drm_connector *int_lvds_connector;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
- /** This object's place on GPU write list */
- struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gem_idle(struct drm_device *dev);
-int i915_lp_ring_sync(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
*/
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_IGD(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
-#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IGDNG(dev) && !IS_IGD(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj, 0);
+ ret = i915_gem_object_get_pages(obj);
if (ret != 0)
goto fail_unlock;
return ret;
}
+static inline gfp_t
+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+{
+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+}
+
+static inline void
+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+{
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+}
+
static int
i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
{
int ret;
- ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
+ ret = i915_gem_object_get_pages(obj);
/* If we've insufficient memory to map in the pages, attempt
* to make some space by throwing out some old buffers.
*/
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
+ gfp_t gfp;
ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj, 0);
+ gfp = i915_gem_object_get_page_gfp_mask(obj);
+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+ ret = i915_gem_object_get_pages(obj);
+ i915_gem_object_set_page_gfp_mask (obj, gfp);
}
return ret;
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj, 0);
+ ret = i915_gem_object_get_pages(obj);
if (ret != 0)
goto fail_unlock;
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
DRM_ERROR("failed to add to map hash\n");
- ret = -ENOMEM;
goto out_free_mm;
}
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
- BUG_ON(!list_empty(&obj_priv->gpu_write_list));
-
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
- gpu_write_list) {
+ &dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
mutex_unlock(&dev->struct_mutex);
}
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
static int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
- if (interruptible)
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
-
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged));
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
return ret;
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
-{
- return i915_do_wait_request(dev, seqno, 1);
-}
-
-/**
- * Waits for the ring to finish up to the latest request. Usefull for waiting
- * for flip events, e.g for the overlay support. */
-int i915_lp_ring_sync(struct drm_device *dev)
-{
- uint32_t seqno;
- int ret;
-
- seqno = i915_add_request(dev, NULL, 0);
-
- if (seqno == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, seqno, 0);
- BUG_ON(ret == -ERESTARTSYS);
- return ret;
-}
-
static void
i915_gem_flush(struct drm_device *dev,
uint32_t invalidate_domains,
#endif
BEGIN_LP_RING(2);
OUT_RING(cmd);
- OUT_RING(MI_NOOP);
+ OUT_RING(0); /* noop */
ADVANCE_LP_RING();
}
}
/* blow away mappings if mapped through GTT */
i915_gem_release_mmap(obj);
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
BUG_ON(obj_priv->active);
- /* release the fence reg _after_ flushing */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
uint32_t seqno;
+ int ret;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
if (ret)
return ret;
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
}
int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
+i915_gem_object_get_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count, i;
inode = obj->filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- mapping_gfp_mask (mapping) |
- __GFP_COLD |
- gfpmask);
+ page = read_mapping_page(mapping, i, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
i915_gem_object_put_pages(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ bool retry_alloc = false;
int ret;
+ if (dev_priv->mm.suspended)
+ return -EBUSY;
+
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
- ret = i915_gem_object_get_pages(obj, gfpmask);
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+ }
+ ret = i915_gem_object_get_pages(obj);
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+ }
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
ret = i915_gem_evict_something(dev, obj->size);
if (ret) {
/* now try to shrink everyone else */
- if (gfpmask) {
- gfpmask = 0;
- goto search_free;
+ if (! retry_alloc) {
+ retry_alloc = true;
+ goto search_free;
}
return ret;
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
- BUG_ON(obj->write_domain);
+ obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
return 0;
}
-/*
- * Prepare buffer for display plane. Use uninterruptible for possible flush
- * wait, as in modesetting process we're not supposed to be interrupted.
- */
-int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- uint32_t old_write_domain, old_read_domains;
- int ret;
-
- /* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
- return -EINVAL;
-
- i915_gem_object_flush_gpu_write_domain(obj);
-
- /* Wait on any GPU rendering and flushing to occur. */
- if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
- if (ret != 0)
- return ret;
- }
-
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
-
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
-
- return 0;
-}
-
/**
* Moves a single object to the CPU read, and possibly write domain.
*
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+ if (dev->flush_domains)
(void)i915_add_request(dev, file_priv,
dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
- if (obj->write_domain)
- list_move_tail(&obj_priv->gpu_write_list,
- &dev_priv->mm.gpu_write_list);
- else
- list_del_init(&obj_priv->gpu_write_list);
-
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
if (!obj_priv->phys_obj)
return;
- ret = i915_gem_object_get_pages(obj, 0);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
goto out;
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj_priv->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_pages(obj, 0);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
DRM_ERROR("failed to get page list\n");
goto out;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier;
+ u32 new_de_iir, new_gt_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- if (de_iir == 0 && gt_iir == 0)
- goto done;
+ for (;;) {
+ if (de_iir == 0 && gt_iir == 0)
+ break;
- ret = IRQ_HANDLED;
+ ret = IRQ_HANDLED;
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
+ I915_WRITE(DEIIR, de_iir);
+ new_de_iir = I915_READ(DEIIR);
+ I915_WRITE(GTIIR, gt_iir);
+ new_gt_iir = I915_READ(GTIIR);
- if (gt_iir & GT_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- }
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+
+ if (gt_iir & GT_USER_INTERRUPT) {
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
+ de_iir = new_de_iir;
+ gt_iir = new_gt_iir;
+ }
-done:
I915_WRITE(DEIER, de_ier);
(void)I915_READ(DEIER);
(void) I915_READ(IER);
}
-/*
- * Must be called after intel_modeset_init or hotplug interrupts won't be
- * enabled correctly.
- */
int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- /* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
- hotplug_en |= CRT_HOTPLUG_INT_EN;
- /* Ignore TV since it's buggy */
-
+ /* Leave other bits alone */
+ hotplug_en |= HOTPLUG_EN_MASK;
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
+ TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
+ SDVOB_HOTPLUG_INT_STATUS;
+ if (IS_G4X(dev)) {
+ dev_priv->hotplug_supported_mask |=
+ HDMIB_HOTPLUG_INT_STATUS |
+ HDMIC_HOTPLUG_INT_STATUS |
+ HDMID_HOTPLUG_INT_STATUS;
+ }
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define FBC_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-#define GMBUS0 0x5100
-#define GMBUS1 0x5104
-#define GMBUS2 0x5108
-#define GMBUS3 0x510c
-#define GMBUS4 0x5110
-#define GMBUS5 0x5120
-
/*
* Clock control & power management
*/
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
+#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
+ HDMIC_HOTPLUG_INT_EN | \
+ HDMID_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ TV_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
+
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
-/* LVDS dithering flag on 965/g4x platform */
-#define LVDS_ENABLE_DITHER (1 << 25)
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
/* Display & cursor control */
-/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
#define PCH_GPIOE 0xc5020
#define PCH_GPIOF 0xc5024
-#define PCH_GMBUS0 0xc5100
-#define PCH_GMBUS1 0xc5104
-#define PCH_GMBUS2 0xc5108
-#define PCH_GMBUS3 0xc510c
-#define PCH_GMBUS4 0xc5110
-#define PCH_GMBUS5 0xc5120
-
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
-#include "intel_drv.h"
+#include "i915_drv.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
- /* I2C state */
- intel_i2c_reset_gmbus(dev);
-
return 0;
}
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
-
- dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE)
return ret;
}
- ret = i915_gem_object_set_to_display_plane(obj);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
u32 temp;
int tries = 5, j, n;
- u32 pipe_bpc;
-
- temp = I915_READ(pipeconf_reg);
- pipe_bpc = temp & PIPE_BPC_MASK;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
DRM_DEBUG("crtc %d dpms on\n", pipe);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0) {
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- }
- }
-
if (HAS_eDP) {
/* enable eDP PLL */
igdng_enable_pll_edp(crtc);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
- /*
- * make the BPC in FDI Rx be consistent with that in
- * pipeconf reg.
- */
- temp &= ~(0x7 << 16);
- temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
FDI_SEL_PCDCLK |
FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
case DRM_MODE_DPMS_OFF:
DRM_DEBUG("crtc %d dpms off\n", pipe);
+ i915_disable_vga(dev);
+
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
I915_READ(dspbase_reg);
}
- i915_disable_vga(dev);
-
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
} else
DRM_DEBUG("crtc %d is disabled\n", pipe);
- udelay(100);
-
- /* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
+ if (HAS_eDP) {
+ igdng_disable_pll_edp(crtc);
}
- I915_WRITE(pf_win_size, 0);
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
I915_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
- /* BPC in FDI rx is consistent with that in pipeconf */
- temp &= ~(0x07 << 16);
- temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
I915_READ(fdi_rx_reg);
udelay(100);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- I915_READ(PCH_LVDS);
- udelay(100);
- }
-
/* disable PCH transcoder */
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
}
}
}
- temp = I915_READ(transconf_reg);
- /* BPC in transcoder is consistent with that in pipeconf */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp);
- I915_READ(transconf_reg);
- udelay(100);
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
I915_READ(pch_dpll_reg);
}
- if (HAS_eDP) {
- igdng_disable_pll_edp(crtc);
- }
-
temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_SEL_PCDCLK;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
+ if ((temp & FDI_RX_PLL_ENABLE) != 0) {
+ temp &= ~FDI_SEL_PCDCLK;
+ temp &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+ }
/* Disable CPU FDI TX PLL */
temp = I915_READ(fdi_tx_reg);
udelay(100);
}
+ /* Disable PF */
+ temp = I915_READ(pf_ctl_reg);
+ if ((temp & PF_ENABLE) != 0) {
+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ I915_READ(pf_ctl_reg);
+ }
+ I915_WRITE(pf_win_size, 0);
+
/* Wait for the clocks to turn off. */
- udelay(100);
+ udelay(150);
break;
}
}
intel_update_watermarks(dev);
/* Give the overlay scaler a chance to disable if it's on this pipe */
//intel_crtc_dpms_video(crtc, FALSE); TODO
- drm_vblank_off(dev, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
sr_entries = roundup(sr_entries / cacheline_size, 1);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
+ int unused3, int unused4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long line_time_us;
- int sr_clock, sr_entries, srwm = 1;
-
- /* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
-
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
-
- /* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- srwm = I945_FIFO_SIZE - sr_entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x3f;
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- }
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
+ DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
- (8 << 0));
+ I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
}
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
/* determine panel color depth */
temp = I915_READ(pipeconf_reg);
- temp &= ~PIPE_BPC_MASK;
- if (is_lvds) {
- int lvds_reg = I915_READ(PCH_LVDS);
- /* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
- temp |= PIPE_8BPC;
- else
- temp |= PIPE_6BPC;
- } else
- temp |= PIPE_8BPC;
- I915_WRITE(pipeconf_reg, temp);
- I915_READ(pipeconf_reg);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
- /* set the dithering flag */
- if (IS_I965G(dev)) {
- if (dev_priv->lvds_dither) {
- if (IS_IGDNG(dev))
- pipeconf |= PIPE_ENABLE_DITHER;
- else
- lvds |= LVDS_ENABLE_DITHER;
- } else {
- if (IS_IGDNG(dev))
- pipeconf &= ~PIPE_ENABLE_DITHER;
- else
- lvds &= ~LVDS_ENABLE_DITHER;
- }
- }
+
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
+void intel_increase_renderclock(struct drm_device *dev, bool schedule)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_IGDNG(dev))
+ return;
+
+ if (!dev_priv->render_reclock_avail) {
+ DRM_DEBUG("not reclocking render clock\n");
+ return;
+ }
+
+ /* Restore render clock frequency to original value */
+ if (IS_G4X(dev) || IS_I9XX(dev))
+ pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
+ else if (IS_I85X(dev))
+ pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
+ DRM_DEBUG("increasing render clock frequency\n");
+
+ /* Schedule downclock */
+ if (schedule)
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+}
+
+void intel_decrease_renderclock(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_IGDNG(dev))
+ return;
+
+ if (!dev_priv->render_reclock_avail) {
+ DRM_DEBUG("not reclocking render clock\n");
+ return;
+ }
+
+ if (IS_G4X(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
+ gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I965G(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
+ gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
+ gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I915G(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
+ gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I85X(dev)) {
+ u16 hpllcc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
+
+ /* Up to maximum... */
+ hpllcc &= ~GC_CLOCK_CONTROL_MASK;
+ hpllcc |= GC_CLOCK_133_200;
+
+ pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
+ }
+ DRM_DEBUG("decreasing render clock frequency\n");
+}
+
+/* Note that no increase function is needed for this - increase_renderclock()
+ * will also rewrite these bits
+ */
+void intel_decrease_displayclock(struct drm_device *dev)
+{
+ if (IS_IGDNG(dev))
+ return;
+
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
+ IS_I915GM(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~0xf0;
+ gcfgc |= 0x80;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ }
+}
+
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
static void intel_crtc_idle_timer(unsigned long arg)
mutex_lock(&dev->struct_mutex);
+ /* GPU isn't processing, downclock it. */
+ if (!dev_priv->busy) {
+ intel_decrease_renderclock(dev);
+ intel_decrease_displayclock(dev);
+ }
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
return;
dev_priv->busy = true;
+ intel_increase_renderclock(dev, true);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D);
- } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+ } else if (IS_I9XX(dev)) {
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
- }
- if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
- DRM_DEBUG_KMS("probing DP_B\n");
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_B);
- }
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOC\n");
+ if (I915_READ(SDVOB) & SDVO_DETECTED)
found = intel_sdvo_init(dev, SDVOC);
- }
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
- if (SUPPORTS_INTEGRATED_HDMI(dev)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+ if (SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
- }
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- DRM_DEBUG_KMS("probing DP_C\n");
+ if (SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_C);
- }
}
- if (SUPPORTS_INTEGRATED_DP(dev) &&
- (I915_READ(DP_D) & DP_DETECTED)) {
- DRM_DEBUG_KMS("probing DP_D\n");
+ if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D);
- }
- } else if (IS_I8XX(dev))
+ } else
intel_dvo_init(dev);
- if (SUPPORTS_TV(dev))
+ if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
intel_tv_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
del_timer_sync(&intel_crtc->idle_timer);
}
+ intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
mutex_unlock(&dev->struct_mutex);
else
intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
- if (output_reg == DP_B || output_reg == PCH_DP_B)
+ if (output_reg == DP_B)
intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C || output_reg == PCH_DP_C)
+ else if (output_reg == DP_C)
intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D || output_reg == PCH_DP_D)
+ else if (output_reg == DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
if (IS_eDP(intel_output)) {
break;
case DP_B:
case PCH_DP_B:
- dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
- dev_priv->hotplug_supported_mask |=
- HDMIC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
- dev_priv->hotplug_supported_mask |=
- HDMID_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
-void intel_i2c_reset_gmbus(struct drm_device *dev);
-
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(fbo, 64*1024);
+ ret = i915_gem_object_pin(fbo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
if (sdvox_reg == SDVOB) {
intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
if (!intel_output->ddc_bus)
goto err_connector;
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
-/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
- * engine, but if the BIOS leaves it enabled, then that can break our use
- * of the bit-banging I2C interfaces. This is notably the case with the
- * Mac Mini in EFI mode.
- */
-void
-intel_i2c_reset_gmbus(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_IGDNG(dev)) {
- I915_WRITE(PCH_GMBUS0, 0);
- } else {
- I915_WRITE(GMBUS0, 0);
- }
-}
-
/**
* intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
if(i2c_bit_add_bus(&chan->adapter))
goto out_free;
- intel_i2c_reset_gmbus(dev);
-
/* JJJ: raise SCL and SDA? */
intel_i2c_quirk_set(dev, true);
set_data(chan, 1);
/* Some lid devices report incorrect lid status, assume they're connected */
static const struct dmi_system_id bad_lid_status[] = {
- {
- .ident = "Compaq nx9020",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_NAME, "3084"),
- },
- },
- {
- .ident = "Samsung SX20S",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
- DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
- },
- },
{
.ident = "Aspire One",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
},
},
- {
- .ident = "PC-81005",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
- },
- },
{ }
};
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector = dev_priv->int_lvds_connector;
- /*
- * check and update the status of LVDS connector after receiving
- * the LID nofication event.
- */
- if (connector)
- connector->status = connector->funcs->detect(connector);
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
DRM_DEBUG("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
- /* keep the LVDS connector */
- dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
return;
}
/**
- * Try to read the response after issuie the DDC switch command. But it
- * is noted that we must do the action of reading response and issuing DDC
- * switch command in one I2C transaction. Otherwise when we try to start
- * another I2C transaction after issuing the DDC bus switch, it will be
- * switched to the internal SDVO register.
+ * Don't check status code from this as it switches the bus back to the
+ * SDVO chips which defeats the purpose of doing a bus switch in the first
+ * place.
*/
static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
u8 target)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
- struct i2c_msg msgs[] = {
- {
- .addr = sdvo_priv->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- },
- /* the following two are to read the response */
- {
- .addr = sdvo_priv->slave_addr >> 1,
- .flags = 0,
- .len = 1,
- .buf = cmd_buf,
- },
- {
- .addr = sdvo_priv->slave_addr >> 1,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = ret_value,
- },
- };
-
- intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
- &target, 1);
- /* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
-
- out_buf[0] = SDVO_I2C_OPCODE;
- out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
- cmd_buf[0] = SDVO_I2C_CMD_STATUS;
- cmd_buf[1] = 0;
- ret_value[0] = 0;
- ret_value[1] = 0;
-
- ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
- if (ret != 3) {
- /* failure in I2C transfer */
- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return;
- }
- if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("DDC switch command returns response %d\n",
- ret_value[0]);
- return;
- }
- return;
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
}
static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
edid = drm_get_edid(&intel_output->base,
intel_output->ddc_bus);
- /* This is only applied to SDVO cards with multiple outputs */
- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
- uint8_t saved_ddc, temp_ddc;
- saved_ddc = sdvo_priv->ddc_bus;
- temp_ddc = sdvo_priv->ddc_bus >> 1;
- /*
- * Don't use the 1 as the argument of DDC bus switch to get
- * the EDID. It is used for SDVO SPD ROM.
- */
- while(temp_ddc > 1) {
- sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
- if (edid) {
- /*
- * When we can get the EDID, maybe it is the
- * correct DDC bus. Update it.
- */
- sdvo_priv->ddc_bus = temp_ddc;
- break;
- }
- temp_ddc >>= 1;
- }
- if (edid == NULL)
- sdvo_priv->ddc_bus = saved_ddc;
- }
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOB/VGA DDC BUS");
- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOC/VGA DDC BUS");
- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
if (intel_output->ddc_bus == NULL)
tv_ctl |= TV_TRILEVEL_SYNC;
if (tv_mode->pal_burst)
tv_ctl |= TV_PAL_BURST;
-
scctl1 = 0;
- if (tv_mode->dda1_inc)
+ /* dda1 implies valid video levels */
+ if (tv_mode->dda1_inc) {
scctl1 |= TV_SC_DDA1_EN;
+ }
+
if (tv_mode->dda2_inc)
scctl1 |= TV_SC_DDA2_EN;
+
if (tv_mode->dda3_inc)
scctl1 |= TV_SC_DDA3_EN;
+
scctl1 |= tv_mode->sc_reset;
- if (video_levels)
- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
tv_priv->margin[TV_MARGIN_BOTTOM]);
-
- dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
out:
drm_sysfs_connector_add(connector);
}
uint8_t count = U8((*ptr)++);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
- udelay(count);
+ schedule_timeout_uninterruptible(usecs_to_jiffies(count));
else
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
}
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
switch (mode) {
case DRM_MODE_DPMS_ON:
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
atombios_blank_crtc(crtc, 0);
- if (rdev->family < CHIP_R600)
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
- radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (rdev->family < CHIP_R600)
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, 1);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
atombios_enable_crtc(crtc, 0);
break;
}
+
+ if (mode != DRM_MODE_DPMS_OFF) {
+ radeon_crtc_load_lut(crtc);
+ }
}
static void
}
}
- /* HIS X1300 is DVI+VGA, not DVI+DVI */
- if ((dev->pdev->device == 0x7146) &&
- (dev->pdev->subsystem_vendor == 0x17af) &&
- (dev->pdev->subsystem_device == 0x2058)) {
- if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
- return false;
- }
-
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
uint32_t mask;
if (radeon_crtc->crtc_id)
- mask = (RADEON_CRTC2_DISP_DIS |
+ mask = (RADEON_CRTC2_EN |
+ RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_VSYNC_DIS |
RADEON_CRTC2_HSYNC_DIS |
RADEON_CRTC2_DISP_REQ_EN_B);
switch (mode) {
case DRM_MODE_DPMS_ON:
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size
*/
- n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
- rdev->cp.ring_size)) / size;
+ n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
+ rdev->cp.ring_size) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
void rs600_gpu_init(struct radeon_device *rdev)
{
+ /* FIXME: HDP same place on rs600 ? */
r100_hdp_reset(rdev);
+ /* FIXME: is this correct ? */
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
void rs600_vram_info(struct radeon_device *rdev)
{
+ /* FIXME: to do or is these values sane ? */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
- rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
- rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
}
void rs600_bandwidth_update(struct radeon_device *rdev)
void rs690_vram_info(struct radeon_device *rdev)
{
+ uint32_t tmp;
fixed20_12 a;
rs400_gart_adjust_size(rdev);
-
+ /* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
- rdev->mc.vram_width = 128;
-
+ /* FIXME: is this correct for RS690/RS740 ? */
+ tmp = RREG32(RADEON_MEM_CNTL);
+ if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
+ rdev->mc.vram_width = 128;
+ } else {
+ rdev->mc.vram_width = 64;
+ }
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
-
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
- APPLE_ISO_KEYBOARD },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS),
- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
-#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
-#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
-#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
- if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
- usbhid_init_reports(hid);
+ usbhid_init_reports(hid);
set_bit(HID_STARTED, &usbhid->iofl);
if (idVendor == USB_VENDOR_ID_NCR &&
idProduct >= USB_DEVICE_ID_NCR_FIRST &&
idProduct <= USB_DEVICE_ID_NCR_LAST)
- return HID_QUIRK_NO_INIT_REPORTS;
+ return HID_QUIRK_NOGET;
down_read(&dquirks_rwsem);
bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
- depends on X86 && PCI && EXPERIMENTAL
+ depends on X86 && EXPERIMENTAL
help
If you say yes here you get support for the temperature
sensor inside your CPU. Most of the family 6 CPUs
#define ADT7462_PIN24_SHIFT 6
#define ADT7462_PIN26_VOLT_INPUT 0x08
#define ADT7462_PIN25_VOLT_INPUT 0x20
-#define ADT7462_PIN28_SHIFT 4 /* cfg3 */
+#define ADT7462_PIN28_SHIFT 6 /* cfg3 */
#define ADT7462_PIN28_VOLT 0x5
#define ADT7462_REG_ALARM1 0xB8
*
* Some, but not all, of these voltages have low/high limits.
*/
-#define ADT7462_VOLT_COUNT 13
+#define ADT7462_VOLT_COUNT 12
#define ADT7462_VENDOR 0x41
#define ADT7462_DEVICE 0x62
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
-#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
int usemsr_ee = 1;
int err;
u32 eax, edx;
- struct pci_dev *host_bridge;
/* Early chips have no MSR for TjMax */
usemsr_ee = 0;
}
- /* Atom CPUs */
+ /* Atoms seems to have TjMax at 90C */
if (c->x86_model == 0x1c) {
usemsr_ee = 0;
-
- host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
-
- if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
- && (host_bridge->device == 0xa000 /* NM10 based nettop */
- || host_bridge->device == 0xa010)) /* NM10 based netbook */
- tjmax = 100000;
- else
- tjmax = 90000;
-
- pci_dev_put(host_bridge);
+ tjmax = 90000;
}
if ((c->x86_model > 0xe) && (usemsr_ee)) {
static int watchdog_open(struct inode *inode, struct file *filp)
{
struct fschmd_data *pos, *data = NULL;
- int watchdog_is_open;
/* We get called from drivers/char/misc.c with misc_mtx hold, and we
call misc_register() from fschmd_probe() with watchdog_data_mutex
}
}
/* Note we can never not have found data, so we don't check for this */
- watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
- if (!watchdog_is_open)
- kref_get(&data->kref);
+ kref_get(&data->kref);
mutex_unlock(&watchdog_data_mutex);
- if (watchdog_is_open)
+ if (test_and_set_bit(0, &data->watchdog_is_open))
return -EBUSY;
/* Start the watchdog */
static int __init lm78_isa_found(unsigned short address)
{
int val, save, found = 0;
- int port;
-
- /* Some boards declare base+0 to base+7 as a PNP device, some base+4
- * to base+7 and some base+5 to base+6. So we better request each port
- * individually for the probing phase. */
- for (port = address; port < address + LM78_EXTENT; port++) {
- if (!request_region(port, 1, "lm78")) {
- pr_debug("lm78: Failed to request port 0x%x\n", port);
- goto release;
- }
+
+ /* We have to request the region in two parts because some
+ boards declare base+4 to base+7 as a PNP device */
+ if (!request_region(address, 4, "lm78")) {
+ pr_debug("lm78: Failed to request low part of region\n");
+ return 0;
+ }
+ if (!request_region(address + 4, 4, "lm78")) {
+ pr_debug("lm78: Failed to request high part of region\n");
+ release_region(address, 4);
+ return 0;
}
#define REALLY_SLOW_IO
val & 0x80 ? "LM79" : "LM78", (int)address);
release:
- for (port--; port >= address; port--)
- release_region(port, 1);
+ release_region(address + 4, 4);
+ release_region(address, 4);
return found;
}
int d1 = 0;
int i;
- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+ for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
/* Find pointer to interpolate */
if (data->supply_uV > temppoints[i - 1].vdd) {
d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
const int c1 = -4;
const int c2 = 40500; /* x 10 ^ -6 */
- const int c3 = -2800; /* x10 ^ -9 */
+ const int c3 = 2800; /* x10 ^ -9 */
RHlinear = c1*1000
+ c2 * data->val_humid/1000
+ (data->val_humid * data->val_humid * c3)/1000000;
- return (temp - 25000) * (10000 + 80 * data->val_humid)
+ return (temp - 25000) * (10000 + 800 * data->val_humid)
/ 1000000 + RHlinear;
}
w83781d_isa_found(unsigned short address)
{
int val, save, found = 0;
- int port;
-
- /* Some boards declare base+0 to base+7 as a PNP device, some base+4
- * to base+7 and some base+5 to base+6. So we better request each port
- * individually for the probing phase. */
- for (port = address; port < address + W83781D_EXTENT; port++) {
- if (!request_region(port, 1, "w83781d")) {
- pr_debug("w83781d: Failed to request port 0x%x\n",
- port);
- goto release;
- }
+
+ /* We have to request the region in two parts because some
+ boards declare base+4 to base+7 as a PNP device */
+ if (!request_region(address, 4, "w83781d")) {
+ pr_debug("w83781d: Failed to request low part of region\n");
+ return 0;
+ }
+ if (!request_region(address + 4, 4, "w83781d")) {
+ pr_debug("w83781d: Failed to request high part of region\n");
+ release_region(address, 4);
+ return 0;
}
#define REALLY_SLOW_IO
val == 0x30 ? "W83782D" : "W83781D", (int)address);
release:
- for (port--; port >= address; port--)
- release_region(port, 1);
+ release_region(address + 4, 4);
+ release_region(address, 4);
return found;
}
unsigned long timeout;
if (irq > -1) {
- ret = wait_event_timeout(pca_wait,
+ ret = wait_event_interruptible_timeout(pca_wait,
pca_isa_readbyte(pd, I2C_PCA_CON)
& I2C_PCA_CON_SI, pca_isa_ops.timeout);
} else {
}
static irqreturn_t pca_handler(int this_irq, void *dev_id) {
- wake_up(&pca_wait);
+ wake_up_interruptible(&pca_wait);
return IRQ_HANDLED;
}
unsigned long timeout;
if (i2c->irq) {
- ret = wait_event_timeout(i2c->wait,
+ ret = wait_event_interruptible_timeout(i2c->wait,
i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
& I2C_PCA_CON_SI, i2c->adap.timeout);
} else {
if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
return IRQ_NONE;
- wake_up(&i2c->wait);
+ wake_up_interruptible(&i2c->wait);
return IRQ_HANDLED;
}
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/types.h>
/* include interfaces to usb layer */
#include <linux/usb.h>
#define CMD_I2C_IO_END (1<<1)
/* i2c bit delay, default is 10us -> 100kHz */
-static unsigned short delay = 10;
-module_param(delay, ushort, 0);
+static int delay = 10;
+module_param(delay, int, 0);
MODULE_PARM_DESC(delay, "bit delay in microseconds, "
"e.g. 10 for 100kHz (default is 100kHz)");
static u32 usb_func(struct i2c_adapter *adapter)
{
- __le32 func;
+ u32 func;
/* get functionality from adapter */
if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
return 0;
}
- return le32_to_cpu(func);
+ return func;
}
/* This is the actual algorithm we define */
"i2c-tiny-usb at bus %03d device %03d",
dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
- if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
+ if (usb_write(&dev->adapter, CMD_SET_DELAY,
+ cpu_to_le16(delay), 0, NULL, 0) != 0) {
dev_err(&dev->adapter.dev,
"failure setting delay to %dus\n", delay);
retval = -EIO;
adap->dev.parent);
#endif
- /* device name is gone after device_unregister */
- dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
-
/* clean up the sysfs representation */
init_completion(&adap->dev_released);
device_unregister(&adap->dev);
idr_remove(&i2c_adapter_idr, adap->nr);
mutex_unlock(&core_lock);
+ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
+
/* Clear the device structure in case this adapter is ever going to be
added again */
memset(&adap->dev, 0, sizeof(adap->dev));
if (!(reg48 & u_flag))
pci_write_config_word(dev, 0x48, reg48|u_flag);
- if ((reg4a & a_speed) != u_speed) {
+ /* FIXME: (reg4a & a_speed) ? */
+ if ((reg4a & u_speed) != u_speed) {
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
pci_read_config_word(dev, 0x4a, ®4a);
pci_write_config_word(dev, 0x4a, reg4a|u_speed);
list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
spin_unlock_irqrestore(&ipath_devs_lock, flags);
ret = create_device_files(sb, dd);
- if (ret)
+ if (ret) {
+ deactivate_locked_super(sb);
goto bail;
+ }
spin_lock_irqsave(&ipath_devs_lock, flags);
}
neigh->neighbour = neighbour;
neigh->dev = dev;
- memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
*to_ipoib_neigh(neighbour) = neigh;
skb_queue_head_init(&neigh->queue);
ipoib_cm_set(neigh, NULL);
return;
}
- dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
+ dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
"toggle %u mode %u scan 0x%08X\n",
address,
command,
* Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com>
* Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru>
* Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net>
*
* ALPS detection, tap switching and status querying info is taken from
* tpconfig utility (by C. Scott Ananian and Bruce Kall).
#define ALPS_OLDPROTO 0x10
#define ALPS_PASS 0x20
#define ALPS_FW_BK_2 0x40
-#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
- 6-byte ALPS packet */
static const struct alps_model_info alps_model_data[] = {
{ { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
{ { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
{ { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
{ { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
- /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
- ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
+ { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude E6500 */
{ { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 }, /* Dell Vostro 1400 */
};
*/
/*
- * PS/2 packet format
- *
- * byte 0: 0 0 YSGN XSGN 1 M R L
- * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- *
- * Note that the device never signals overflow condition.
- *
- * ALPS absolute Mode - new format
+ * ALPS abolute Mode - new format
*
* byte 0: 1 ? ? ? 1 ? ? ?
* byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 ? fin ges
+ * byte 2: 0 x10 x9 x8 x7 ? fin ges
* byte 3: 0 y9 y8 y7 1 M R L
* byte 4: 0 y6 y5 y4 y3 y2 y1 y0
* byte 5: 0 z6 z5 z4 z3 z2 z1 z0
*
- * Dualpoint device -- interleaved packet format
- *
- * byte 0: 1 1 0 0 1 1 1 1
- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 0 fin ges
- * byte 3: 0 0 YSGN XSGN 1 1 1 1
- * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- * byte 6: 0 y9 y8 y7 1 m r l
- * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
- * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
- *
- * CAPITALS = stick, miniscules = touchpad
- *
* ?'s can have different meanings on different models,
* such as wheel rotation, extra buttons, stick buttons
* on a dualpoint, etc.
*/
-static bool alps_is_valid_first_byte(const struct alps_model_info *model,
- unsigned char data)
-{
- return (data & model->mask0) == model->byte0;
-}
-
-static void alps_report_buttons(struct psmouse *psmouse,
- struct input_dev *dev1, struct input_dev *dev2,
- int left, int right, int middle)
-{
- struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
-
- if (model->flags & ALPS_PS2_INTERLEAVED) {
- struct input_dev *dev;
-
- /*
- * If shared button has already been reported on the
- * other device (dev2) then this event should be also
- * sent through that device.
- */
- dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
- input_report_key(dev, BTN_LEFT, left);
-
- dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
- input_report_key(dev, BTN_RIGHT, right);
-
- dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
- input_report_key(dev, BTN_MIDDLE, middle);
-
- /*
- * Sync the _other_ device now, we'll do the first
- * device later once we report the rest of the events.
- */
- input_sync(dev2);
- } else {
- /*
- * For devices with non-interleaved packets we know what
- * device buttons belong to so we can simply report them.
- */
- input_report_key(dev1, BTN_LEFT, left);
- input_report_key(dev1, BTN_RIGHT, right);
- input_report_key(dev1, BTN_MIDDLE, middle);
- }
-}
-
static void alps_process_packet(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
+ if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */
+ input_report_key(dev2, BTN_LEFT, packet[0] & 1);
+ input_report_key(dev2, BTN_RIGHT, packet[0] & 2);
+ input_report_key(dev2, BTN_MIDDLE, packet[0] & 4);
+ input_report_rel(dev2, REL_X,
+ packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
+ input_report_rel(dev2, REL_Y,
+ packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
+ input_sync(dev2);
+ return;
+ }
+
if (priv->i->flags & ALPS_OLDPROTO) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
- alps_report_buttons(psmouse, dev2, dev, left, right, middle);
+ input_report_key(dev2, BTN_LEFT, left);
+ input_report_key(dev2, BTN_RIGHT, right);
+ input_report_key(dev2, BTN_MIDDLE, middle);
+ input_sync(dev);
input_sync(dev2);
return;
}
- alps_report_buttons(psmouse, dev, dev2, left, right, middle);
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+ input_report_key(dev, BTN_MIDDLE, middle);
/* Convert hardware tap to a reasonable Z value */
if (ges && !fin) z = 40;
input_sync(dev);
}
-static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
- unsigned char packet[],
- bool report_buttons)
-{
- struct alps_data *priv = psmouse->private;
- struct input_dev *dev2 = priv->dev2;
-
- if (report_buttons)
- alps_report_buttons(psmouse, dev2, psmouse->dev,
- packet[0] & 1, packet[0] & 2, packet[0] & 4);
-
- input_report_rel(dev2, REL_X,
- packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
- input_report_rel(dev2, REL_Y,
- packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
-
- input_sync(dev2);
-}
-
-static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
-{
- struct alps_data *priv = psmouse->private;
-
- if (psmouse->pktcnt < 6)
- return PSMOUSE_GOOD_DATA;
-
- if (psmouse->pktcnt == 6) {
- /*
- * Start a timer to flush the packet if it ends up last
- * 6-byte packet in the stream. Timer needs to fire
- * psmouse core times out itself. 20 ms should be enough
- * to decide if we are getting more data or not.
- */
- mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20));
- return PSMOUSE_GOOD_DATA;
- }
-
- del_timer(&priv->timer);
-
- if (psmouse->packet[6] & 0x80) {
-
- /*
- * Highest bit is set - that means we either had
- * complete ALPS packet and this is start of the
- * next packet or we got garbage.
- */
-
- if (((psmouse->packet[3] |
- psmouse->packet[4] |
- psmouse->packet[5]) & 0x80) ||
- (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
- dbg("refusing packet %x %x %x %x "
- "(suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5], psmouse->packet[6]);
- return PSMOUSE_BAD_DATA;
- }
-
- alps_process_packet(psmouse);
-
- /* Continue with the next packet */
- psmouse->packet[0] = psmouse->packet[6];
- psmouse->pktcnt = 1;
-
- } else {
-
- /*
- * High bit is 0 - that means that we indeed got a PS/2
- * packet in the middle of ALPS packet.
- *
- * There is also possibility that we got 6-byte ALPS
- * packet followed by 3-byte packet from trackpoint. We
- * can not distinguish between these 2 scenarios but
- * becase the latter is unlikely to happen in course of
- * normal operation (user would need to press all
- * buttons on the pad and start moving trackpoint
- * without touching the pad surface) we assume former.
- * Even if we are wrong the wost thing that would happen
- * the cursor would jump but we should not get protocol
- * desynchronization.
- */
-
- alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
- false);
-
- /*
- * Continue with the standard ALPS protocol handling,
- * but make sure we won't process it as an interleaved
- * packet again, which may happen if all buttons are
- * pressed. To avoid this let's reset the 4th bit which
- * is normally 1.
- */
- psmouse->packet[3] = psmouse->packet[6] & 0xf7;
- psmouse->pktcnt = 4;
- }
-
- return PSMOUSE_GOOD_DATA;
-}
-
-static void alps_flush_packet(unsigned long data)
-{
- struct psmouse *psmouse = (struct psmouse *)data;
-
- serio_pause_rx(psmouse->ps2dev.serio);
-
- if (psmouse->pktcnt == 6) {
-
- /*
- * We did not any more data in reasonable amount of time.
- * Validate the last 3 bytes and process as a standard
- * ALPS packet.
- */
- if ((psmouse->packet[3] |
- psmouse->packet[4] |
- psmouse->packet[5]) & 0x80) {
- dbg("refusing packet %x %x %x "
- "(suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5]);
- } else {
- alps_process_packet(psmouse);
- }
- psmouse->pktcnt = 0;
- }
-
- serio_continue_rx(psmouse->ps2dev.serio);
-}
-
static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
if (psmouse->pktcnt == 3) {
- alps_report_bare_ps2_packet(psmouse, psmouse->packet,
- true);
+ alps_process_packet(psmouse);
return PSMOUSE_FULL_PACKET;
}
return PSMOUSE_GOOD_DATA;
}
- /* Check for PS/2 packet stuffed in the middle of ALPS packet. */
-
- if ((model->flags & ALPS_PS2_INTERLEAVED) &&
- psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
- return alps_handle_interleaved_ps2(psmouse);
- }
-
- if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
- dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
- psmouse->packet[0], model->mask0, model->byte0);
+ if ((psmouse->packet[0] & priv->i->mask0) != priv->i->byte0)
return PSMOUSE_BAD_DATA;
- }
/* Bytes 2 - 6 should have 0 in the highest bit */
if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
- (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
- dbg("refusing packet[%i] = %x\n",
- psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]);
+ (psmouse->packet[psmouse->pktcnt - 1] & 0x80))
return PSMOUSE_BAD_DATA;
- }
if (psmouse->pktcnt == 6) {
alps_process_packet(psmouse);
struct alps_data *priv = psmouse->private;
psmouse_reset(psmouse);
- del_timer_sync(&priv->timer);
input_unregister_device(priv->dev2);
kfree(priv);
}
goto init_fail;
priv->dev2 = dev2;
- setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
-
psmouse->private = priv;
if (alps_hw_init(psmouse, &version))
char phys[32]; /* Phys */
const struct alps_model_info *i;/* Info */
int prev_fin; /* Finger bit from previous packet */
- struct timer_list timer;
};
#ifdef CONFIG_MOUSE_PS2_ALPS
max_proto = PSMOUSE_IMEX;
}
+/*
+ * Try Finger Sensing Pad
+ */
+ if (max_proto > PSMOUSE_IMEX) {
+ if (fsp_detect(psmouse, set_properties) == 0) {
+ if (!set_properties || fsp_init(psmouse) == 0)
+ return PSMOUSE_FSP;
+/*
+ * Init failed, try basic relative protocols
+ */
+ max_proto = PSMOUSE_IMEX;
+ }
+ }
if (max_proto > PSMOUSE_IMEX) {
if (genius_detect(psmouse, set_properties) == 0)
return PSMOUSE_TOUCHKIT_PS2;
}
-/*
- * Try Finger Sensing Pad. We do it here because its probe upsets
- * Trackpoint devices (causing TP_READ_ID command to time out).
- */
- if (max_proto > PSMOUSE_IMEX) {
- if (fsp_detect(psmouse, set_properties) == 0) {
- if (!set_properties || fsp_init(psmouse) == 0)
- return PSMOUSE_FSP;
-/*
- * Init failed, try basic relative protocols
- */
- max_proto = PSMOUSE_IMEX;
- }
- }
-
/*
* Reset to defaults in case the device got confused by extended
* protocol probes. Note that we follow up with full reset because
#include <linux/dmi.h>
-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = {
{
- /*
- * Arima-Rioworks HDAMB -
- * AUX LOOP command does not raise AUX IRQ
- */
+ /* AUX LOOP command does not raise AUX IRQ */
+ .ident = "Arima-Rioworks HDAMB",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
},
},
{
- /* ASUS G1S */
+ .ident = "ASUS G1S",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_BOARD_NAME, "G1S"),
},
},
{
- /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
+ /* AUX LOOP command does not raise AUX IRQ */
+ .ident = "ASUS P65UP5",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
},
},
{
+ .ident = "Compaq Proliant 8500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
},
},
{
+ .ident = "Compaq Proliant DL760",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
},
},
{
- /* OQO Model 01 */
+ .ident = "OQO Model 01",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
},
},
{
- /* ULI EV4873 - AUX LOOP does not work properly */
+ /* AUX LOOP does not work properly */
+ .ident = "ULI EV4873",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
},
},
{
- /* Microsoft Virtual Machine */
+ .ident = "Microsoft Virtual Machine",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
},
},
{
- /* Medion MAM 2070 */
+ .ident = "Medion MAM 2070",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
},
},
{
- /* Blue FB5601 */
+ .ident = "Blue FB5601",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "blue"),
DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
},
},
{
- /* Gigabyte M912 */
+ .ident = "Gigabyte M912",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
},
},
{
- /* Gigabyte M1022M netbook */
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
- },
- },
- {
+ .ident = "HP DV9700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
* ... apparently some Toshibas don't like MUX mode either and
* die horrible death on reboot.
*/
-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
{
- /* Fujitsu Lifebook P7010/P7010D */
+ .ident = "Fujitsu Lifebook P7010/P7010D",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
},
},
{
- /* Fujitsu Lifebook P7010 */
+ .ident = "Fujitsu Lifebook P7010",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
},
},
{
- /* Fujitsu Lifebook P5020D */
+ .ident = "Fujitsu Lifebook P5020D",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
},
},
{
- /* Fujitsu Lifebook S2000 */
+ .ident = "Fujitsu Lifebook S2000",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
},
},
{
- /* Fujitsu Lifebook S6230 */
+ .ident = "Fujitsu Lifebook S6230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
},
},
{
- /* Fujitsu T70H */
+ .ident = "Fujitsu T70H",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
},
},
{
- /* Fujitsu-Siemens Lifebook T3010 */
+ .ident = "Fujitsu-Siemens Lifebook T3010",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
},
},
{
- /* Fujitsu-Siemens Lifebook E4010 */
+ .ident = "Fujitsu-Siemens Lifebook E4010",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
},
},
{
- /* Fujitsu-Siemens Amilo Pro 2010 */
+ .ident = "Fujitsu-Siemens Amilo Pro 2010",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
},
},
{
- /* Fujitsu-Siemens Amilo Pro 2030 */
+ .ident = "Fujitsu-Siemens Amilo Pro 2030",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
* No data is coming from the touchscreen unless KBC
* is in legacy mode.
*/
- /* Panasonic CF-29 */
+ .ident = "Panasonic CF-29",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
},
{
/*
- * HP Pavilion DV4017EA -
- * errors on MUX ports are reported without raising AUXDATA
+ * Errors on MUX ports are reported without raising AUXDATA
* causing "spurious NAK" messages.
*/
+ .ident = "HP Pavilion DV4017EA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
},
{
/*
- * HP Pavilion ZT1000 -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ * Like DV4017EA does not raise AUXERR for errors on MUX ports.
*/
+ .ident = "HP Pavilion ZT1000",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
},
{
/*
- * HP Pavilion DV4270ca -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ * Like DV4017EA does not raise AUXERR for errors on MUX ports.
*/
+ .ident = "HP Pavilion DV4270ca",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
},
},
{
+ .ident = "Toshiba P10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
},
},
{
+ .ident = "Toshiba Equium A110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
},
},
{
+ .ident = "Alienware Sentia",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
},
},
{
- /* Sharp Actius MM20 */
+ .ident = "Sharp Actius MM20",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
},
},
{
- /* Sony Vaio FS-115b */
+ .ident = "Sony Vaio FS-115b",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
},
{
/*
- * Sony Vaio FZ-240E -
- * reset and GET ID commands issued via KBD port are
+ * Reset and GET ID commands issued via KBD port are
* sometimes being delivered to AUX3.
*/
+ .ident = "Sony Vaio FZ-240E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
},
},
{
- /* Amoi M636/A737 */
+ .ident = "Amoi M636/A737",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
},
},
{
- /* Lenovo 3000 n100 */
+ .ident = "Lenovo 3000 n100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
},
},
{
+ .ident = "Acer Aspire 1360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
},
},
{
- /* Gericom Bellagio */
+ .ident = "Gericom Bellagio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
},
},
{
- /* IBM 2656 */
+ .ident = "IBM 2656",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
},
},
{
- /* Dell XPS M1530 */
+ .ident = "Dell XPS M1530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
},
},
{
- /* Compal HEL80I */
+ .ident = "Compal HEL80I",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
},
},
{
- /* Dell Vostro 1510 */
+ .ident = "Dell Vostro 1510",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
},
},
{
- /* Acer Aspire 5536 */
+ .ident = "Acer Aspire 5536",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
{ }
};
-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
{
- /* MSI Wind U-100 */
+ .ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "U-100"),
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
},
},
{
- /* LG Electronics X110 */
+ .ident = "LG Electronics X110",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X110"),
DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
},
},
{
- /* Acer Aspire One 150 */
+ .ident = "Acer Aspire One 150",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
},
},
{
- /* Advent 4211 */
+ .ident = "Advent 4211",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
},
},
{
- /* Medion Akoya Mini E1210 */
+ .ident = "Medion Akoya Mini E1210",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
},
},
{
- /* Mivvy M310 */
+ .ident = "Mivvy M310",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
},
},
{
- /* Dell Vostro 1320 */
+ .ident = "Dell Vostro 1320",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
},
},
{
- /* Dell Vostro 1520 */
+ .ident = "Dell Vostro 1520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
},
},
{
- /* Dell Vostro 1720 */
+ .ident = "Dell Vostro 1720",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
};
#ifdef CONFIG_PNP
-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
+static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = {
{
- /* Intel MBO Desktop D845PESV */
+ .ident = "Intel MBO Desktop D845PESV",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
},
},
{
- /* MSI Wind U-100 */
+ .ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "U-100"),
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
{ }
};
-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
+static struct dmi_system_id __initdata i8042_dmi_laptop_table[] = {
{
+ .ident = "Portable",
.matches = {
DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
},
{
+ .ident = "Laptop",
.matches = {
DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
},
},
{
+ .ident = "Notebook",
.matches = {
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
},
{
+ .ident = "Sub-Notebook",
.matches = {
DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
* Originally, this was just confined to older laptops, but a few Acer laptops
* have turned up in 2007 that also need this again.
*/
-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
- {
- /* Acer Aspire 5610 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
- },
- },
+static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
{
- /* Acer Aspire 5630 */
+ .ident = "Acer Aspire 5630",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
},
},
{
- /* Acer Aspire 5650 */
+ .ident = "Acer Aspire 5650",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
},
},
{
- /* Acer Aspire 5680 */
+ .ident = "Acer Aspire 5680",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
},
},
{
- /* Acer Aspire 5720 */
+ .ident = "Acer Aspire 5720",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
},
{
- /* Acer Aspire 9110 */
+ .ident = "Acer Aspire 9110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
},
},
{
- /* Acer TravelMate 660 */
+ .ident = "Acer TravelMate 660",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
},
},
{
- /* Acer TravelMate 2490 */
+ .ident = "Acer TravelMate 2490",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
},
{
- /* Acer TravelMate 4280 */
+ .ident = "Acer TravelMate 4280",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
* We assume the Guest has the same number of GDT entries as the
* Host, otherwise we'd have to dynamically allocate the Guest GDT.
*/
- if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
+ if (num >= ARRAY_SIZE(cpu->arch.gdt))
kill_guest(cpu, "too many gdt entries %i", num);
- return;
- }
/* Set it up, then fix it. */
cpu->arch.gdt[num].a = lo;
u8 limits[3];
int last_speed[2];
int last_var[2];
- int pwm_inv[2];
};
static enum {ADT7460, ADT7467} therm_type;
if (speed >= 0) {
manual = read_reg(th, MANUAL_MODE[fan]);
- manual &= ~INVERT_MASK;
write_reg(th, MANUAL_MODE[fan],
- manual | MANUAL_MASK | th->pwm_inv[fan]);
+ (manual|MANUAL_MASK) & (~INVERT_MASK));
write_reg(th, FAN_SPD_SET[fan], speed);
} else {
/* back to automatic */
if(therm_type == ADT7460) {
manual = read_reg(th,
MANUAL_MODE[fan]) & (~MANUAL_MASK);
- manual &= ~INVERT_MASK;
- manual |= th->pwm_inv[fan];
+
write_reg(th,
MANUAL_MODE[fan], manual|REM_CONTROL[fan]);
} else {
manual = read_reg(th, MANUAL_MODE[fan]);
- manual &= ~INVERT_MASK;
- manual |= th->pwm_inv[fan];
write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK));
}
}
thermostat = th;
- /* record invert bit status because fw can corrupt it after suspend */
- th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK;
- th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK;
-
/* be sure to really write fan speed the first time */
th->last_speed[0] = -2;
th->last_speed[1] = -2;
fct->ctrl.name = "cpu-front-fan-1";
else if (!strcmp(l, "CPU A PUMP"))
fct->ctrl.name = "cpu-pump-0";
- else if (!strcmp(l, "CPU B PUMP"))
- fct->ctrl.name = "cpu-pump-1";
else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") ||
!strcmp(l, "EXPANSION SLOTS INTAKE"))
fct->ctrl.name = "slots-fan";
* out to disk
*/
-void bitmap_daemon_work(mddev_t *mddev)
+void bitmap_daemon_work(struct bitmap *bitmap)
{
- struct bitmap *bitmap;
unsigned long j;
unsigned long flags;
struct page *page = NULL, *lastpage = NULL;
int blocks;
void *paddr;
- /* Use a mutex to guard daemon_work against
- * bitmap_destroy.
- */
- mutex_lock(&mddev->bitmap_mutex);
- bitmap = mddev->bitmap;
- if (bitmap == NULL) {
- mutex_unlock(&mddev->bitmap_mutex);
+ if (bitmap == NULL)
return;
- }
if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
goto done;
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- goto done;
+ return;
}
bitmap->allclean = 1;
done:
if (bitmap->allclean == 0)
bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ;
- mutex_unlock(&mddev->bitmap_mutex);
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
*/
sleep = bitmap->daemon_sleep;
bitmap->daemon_sleep = 0;
- bitmap_daemon_work(mddev);
- bitmap_daemon_work(mddev);
- bitmap_daemon_work(mddev);
+ bitmap_daemon_work(bitmap);
+ bitmap_daemon_work(bitmap);
+ bitmap_daemon_work(bitmap);
bitmap->daemon_sleep = sleep;
bitmap_update_sb(bitmap);
}
kfree(bp);
kfree(bitmap);
}
-
void bitmap_destroy(mddev_t *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap) /* there was no bitmap */
return;
- mutex_lock(&mddev->bitmap_mutex);
mddev->bitmap = NULL; /* disconnect from the md device */
- mutex_unlock(&mddev->bitmap_mutex);
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
void bitmap_unplug(struct bitmap *bitmap);
-void bitmap_daemon_work(mddev_t *mddev);
+void bitmap_daemon_work(struct bitmap *bitmap);
#endif
#endif
/*
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
const char *opts);
void (*dtr)(struct crypt_config *cc);
- int (*init)(struct crypt_config *cc);
- int (*wipe)(struct crypt_config *cc);
+ const char *(*status)(struct crypt_config *cc);
int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};
-struct iv_essiv_private {
- struct crypto_cipher *tfm;
- struct crypto_hash *hash_tfm;
- u8 *salt;
-};
-
-struct iv_benbi_private {
- int shift;
-};
-
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
struct crypt_iv_operations *iv_gen_ops;
char *iv_mode;
union {
- struct iv_essiv_private essiv;
- struct iv_benbi_private benbi;
+ struct crypto_cipher *essiv_tfm;
+ int benbi_shift;
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
return 0;
}
-/* Initialise ESSIV - compute salt but no local memory allocations */
-static int crypt_iv_essiv_init(struct crypt_config *cc)
-{
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- struct hash_desc desc;
- struct scatterlist sg;
- int err;
-
- sg_init_one(&sg, cc->key, cc->key_size);
- desc.tfm = essiv->hash_tfm;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
- if (err)
- return err;
-
- return crypto_cipher_setkey(essiv->tfm, essiv->salt,
- crypto_hash_digestsize(essiv->hash_tfm));
-}
-
-/* Wipe salt and reset key derived from volume key */
-static int crypt_iv_essiv_wipe(struct crypt_config *cc)
-{
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
-
- memset(essiv->salt, 0, salt_size);
-
- return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
-}
-
-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
-{
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-
- crypto_free_cipher(essiv->tfm);
- essiv->tfm = NULL;
-
- crypto_free_hash(essiv->hash_tfm);
- essiv->hash_tfm = NULL;
-
- kzfree(essiv->salt);
- essiv->salt = NULL;
-}
-
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- struct crypto_cipher *essiv_tfm = NULL;
- struct crypto_hash *hash_tfm = NULL;
- u8 *salt = NULL;
+ struct crypto_cipher *essiv_tfm;
+ struct crypto_hash *hash_tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+ unsigned int saltsize;
+ u8 *salt;
int err;
- if (!opts) {
+ if (opts == NULL) {
ti->error = "Digest algorithm missing for ESSIV mode";
return -EINVAL;
}
- /* Allocate hash algorithm */
+ /* Hash the cipher key with the given hash algorithm */
hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
- err = PTR_ERR(hash_tfm);
- goto bad;
+ return PTR_ERR(hash_tfm);
}
- salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
- if (!salt) {
+ saltsize = crypto_hash_digestsize(hash_tfm);
+ salt = kmalloc(saltsize, GFP_KERNEL);
+ if (salt == NULL) {
ti->error = "Error kmallocing salt storage in ESSIV";
- err = -ENOMEM;
- goto bad;
+ crypto_free_hash(hash_tfm);
+ return -ENOMEM;
}
- /* Allocate essiv_tfm */
+ sg_init_one(&sg, cc->key, cc->key_size);
+ desc.tfm = hash_tfm;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
+ crypto_free_hash(hash_tfm);
+
+ if (err) {
+ ti->error = "Error calculating hash in ESSIV";
+ kfree(salt);
+ return err;
+ }
+
+ /* Setup the essiv_tfm with the given salt */
essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(essiv_tfm)) {
ti->error = "Error allocating crypto tfm for ESSIV";
- err = PTR_ERR(essiv_tfm);
- goto bad;
+ kfree(salt);
+ return PTR_ERR(essiv_tfm);
}
if (crypto_cipher_blocksize(essiv_tfm) !=
crypto_ablkcipher_ivsize(cc->tfm)) {
ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
- err = -EINVAL;
- goto bad;
+ crypto_free_cipher(essiv_tfm);
+ kfree(salt);
+ return -EINVAL;
}
+ err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
+ if (err) {
+ ti->error = "Failed to set key for ESSIV cipher";
+ crypto_free_cipher(essiv_tfm);
+ kfree(salt);
+ return err;
+ }
+ kfree(salt);
- cc->iv_gen_private.essiv.salt = salt;
- cc->iv_gen_private.essiv.tfm = essiv_tfm;
- cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
-
+ cc->iv_gen_private.essiv_tfm = essiv_tfm;
return 0;
+}
-bad:
- if (essiv_tfm && !IS_ERR(essiv_tfm))
- crypto_free_cipher(essiv_tfm);
- if (hash_tfm && !IS_ERR(hash_tfm))
- crypto_free_hash(hash_tfm);
- kfree(salt);
- return err;
+static void crypt_iv_essiv_dtr(struct crypt_config *cc)
+{
+ crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
+ cc->iv_gen_private.essiv_tfm = NULL;
}
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
memset(iv, 0, cc->iv_size);
*(u64 *)iv = cpu_to_le64(sector);
- crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
+ crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
return 0;
}
return -EINVAL;
}
- cc->iv_gen_private.benbi.shift = 9 - log;
+ cc->iv_gen_private.benbi_shift = 9 - log;
return 0;
}
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
- val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
+ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
return 0;
static struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
- .init = crypt_iv_essiv_init,
- .wipe = crypt_iv_essiv_wipe,
.generator = crypt_iv_essiv_gen
};
cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
goto bad_ivmode;
- if (cc->iv_gen_ops && cc->iv_gen_ops->init &&
- cc->iv_gen_ops->init(cc) < 0) {
- ti->error = "Error initialising IV";
- goto bad_slab_pool;
- }
-
cc->iv_size = crypto_ablkcipher_ivsize(tfm);
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct crypt_config *cc = ti->private;
- int ret = -EINVAL;
if (argc < 2)
goto error;
DMWARN("not suspended during key manipulation.");
return -EINVAL;
}
- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
- ret = crypt_set_key(cc, argv[2]);
- if (ret)
- return ret;
- if (cc->iv_gen_ops && cc->iv_gen_ops->init)
- ret = cc->iv_gen_ops->init(cc);
- return ret;
- }
- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
- if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
- ret = cc->iv_gen_ops->wipe(cc);
- if (ret)
- return ret;
- }
+ if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
+ return crypt_set_key(cc, argv[2]);
+ if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
return crypt_wipe_key(cc);
- }
}
error:
type = get_type("N");
else {
ti->error = "Persistent flag is not P or N";
- r = -EINVAL;
- goto bad_type;
+ return -EINVAL;
}
if (!type) {
*/
static DECLARE_RWSEM(_hash_lock);
-/*
- * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
- */
-static DEFINE_MUTEX(dm_hash_cells_mutex);
-
static void init_buckets(struct list_head *buckets)
{
unsigned int i;
list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
}
dm_get(md);
- mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(md, cell);
- mutex_unlock(&dm_hash_cells_mutex);
up_write(&_hash_lock);
return 0;
/* remove from the dev hash */
list_del(&hc->uuid_list);
list_del(&hc->name_list);
- mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(hc->md, NULL);
- mutex_unlock(&dm_hash_cells_mutex);
table = dm_get_table(hc->md);
if (table) {
*/
list_del(&hc->name_list);
old_name = hc->name;
- mutex_lock(&dm_hash_cells_mutex);
hc->name = new_name;
- mutex_unlock(&dm_hash_cells_mutex);
list_add(&hc->name_list, _name_buckets + hash_str(new_name));
/*
if (!md)
return -ENXIO;
- mutex_lock(&dm_hash_cells_mutex);
+ dm_get(md);
+ down_read(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
r = -ENXIO;
strcpy(uuid, hc->uuid ? : "");
out:
- mutex_unlock(&dm_hash_cells_mutex);
+ up_read(&_hash_lock);
+ dm_put(md);
return r;
}
{
int r = 0;
size_t dummy = 0;
- int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
+ int overhead_size =
+ sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
struct dm_ulog_request *tfr = prealloced_ulog_tfr;
struct receiving_pkg pkg;
- /*
- * Given the space needed to hold the 'struct cn_msg' and
- * 'struct dm_ulog_request' - do we have enough payload
- * space remaining?
- */
if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
DMINFO("Size of tfr exceeds preallocated size");
return -EINVAL;
*/
mutex_lock(&dm_ulog_lock);
- memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
memcpy(tfr->uuid, uuid, DM_UUID_LEN);
tfr->luid = luid;
tfr->seq = dm_ulog_seq++;
hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
hash_size = min(hash_size, max_buckets);
- if (hash_size < 64)
- hash_size = 64;
hash_size = rounddown_pow_of_two(hash_size);
if (init_exception_table(&s->complete, hash_size,
DM_CHUNK_CONSECUTIVE_BITS))
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
+ down_write(&snap->lock);
+
switch (type) {
case STATUSTYPE_INFO:
-
- down_write(&snap->lock);
-
if (!snap->valid)
DMEMIT("Invalid");
else {
else
DMEMIT("Unknown");
}
-
- up_write(&snap->lock);
-
break;
case STATUSTYPE_TABLE:
break;
}
+ up_write(&snap->lock);
+
return 0;
}
}
stripes = simple_strtoul(argv[0], &end, 10);
- if (!stripes || *end) {
+ if (*end) {
ti->error = "Invalid stripe count";
return -EINVAL;
}
return 0;
}
- if (bdev_stack_limits(limits, bdev, start) < 0)
- DMWARN("%s: adding target device %s caused an alignment inconsistency: "
+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
+ DMWARN("%s: target device %s is misaligned: "
"physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
q->limits.physical_block_size,
q->limits.logical_block_size,
q->limits.alignment_offset,
- (unsigned long long) start << SECTOR_SHIFT);
+ (unsigned long long) start << 9);
+
/*
* Check if merge fn is supported.
* for the table.
*/
if (blk_stack_limits(limits, &ti_limits, 0) < 0)
- DMWARN("%s: adding target device "
+ DMWARN("%s: target device "
"(start sect %llu len %llu) "
- "caused an alignment inconsistency",
+ "is misaligned",
dm_device_name(table->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
+ /*
+ * Each target device in the table has a data area that should normally
+ * be aligned such that the DM device's alignment_offset is 0.
+ * FIXME: Propagate alignment_offsets up the stack and warn of
+ * sub-optimal or inconsistent settings.
+ */
+ limits->alignment_offset = 0;
+ limits->misaligned = 0;
+
/*
* Copy table's limits to the DM device's request_queue
*/
list_del_init(&event->elist);
/*
- * When a device is being removed this copy fails and we
- * discard these unsent events.
+ * Need to call dm_copy_name_and_uuid from here for now.
+ * Context of previous var adds and locking used for
+ * hash_cell not compatable.
*/
if (dm_copy_name_and_uuid(event->md, event->name,
event->uuid)) {
- DMINFO("%s: skipping sending uevent for lost device",
- __func__);
+ DMERR("%s: dm_copy_name_and_uuid() failed",
+ __func__);
goto uevent_free;
}
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
- mddev->ctime == 0 && !mddev->hold_active) {
- /* Array is not configured at all, and not held active,
- * so destroy it */
+ !mddev->hold_active) {
list_del(&mddev->all_mddevs);
if (mddev->gendisk) {
/* we did a probe so need to clean up.
mutex_init(&new->open_mutex);
mutex_init(&new->reconfig_mutex);
- mutex_init(&new->bitmap_mutex);
INIT_LIST_HEAD(&new->disks);
INIT_LIST_HEAD(&new->all_mddevs);
init_timer(&new->safemode_timer);
mddev->barriers_work = 1;
mddev->ok_start_degraded = start_dirty_degraded;
- if (start_readonly && mddev->ro == 0)
+ if (start_readonly)
mddev->ro = 2; /* read-only, but switch on first write */
err = mddev->pers->run(mddev);
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
- /* ensure mddev_put doesn't delete this now that there
- * is some minimal configuration.
- */
- mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
if (mddev->bitmap)
- bitmap_daemon_work(mddev);
+ bitmap_daemon_work(mddev->bitmap);
if (mddev->ro)
return;
* hot-adding a bitmap. It should
* eventually be settable by sysfs.
*/
- struct mutex bitmap_mutex;
struct list_head all_mddevs;
};
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev) == 0) {
char nm[20];
- if (rdev->raid_disk >= conf->previous_raid_disks) {
+ if (rdev->raid_disk >= conf->previous_raid_disks)
set_bit(In_sync, &rdev->flags);
- added_devices++;
- } else
+ else
rdev->recovery_offset = 0;
+ added_devices++;
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
break;
}
- /* When a reshape changes the number of devices, ->degraded
- * is measured against the large of the pre and post number of
- * devices.*/
if (mddev->delta_disks > 0) {
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
+ mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
- added_devices;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
i = j = 0;
while (reg_pair1[i].reg || reg_pair1[i].val) {
- while (reg_pair2[j].reg || reg_pair2[j].val) {
+ while (reg_pair2[j].reg || reg_pair2[j].reg) {
if (reg_pair1[i].reg != reg_pair2[j].reg) {
j++;
continue;
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
+ INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
init_timer(&dmxdevfilter->timer);
dvbdev->users++;
dmxdevfilter->type = DMXDEV_TYPE_PES;
memcpy(&dmxdevfilter->params, params,
sizeof(struct dmx_pes_filter_params));
- INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xb910),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
- { USB_DEVICE(0x2040, 0xb980),
- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
- { USB_DEVICE(0x2040, 0xb990),
- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xc000),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
- { USB_DEVICE(0x2040, 0xc010),
- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
- { USB_DEVICE(0x2040, 0xc080),
- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
- { USB_DEVICE(0x2040, 0xc090),
- .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ } /* Terminating entry */
};
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4064),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
- {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4068),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
}
}
if (avg_lum > MAX_AVG_LUM) {
- if (sd->gain >= 1) {
+ if (sd->gain - 1 >= 0) {
sd->gain--;
set_gain(gspca_dev);
}
rc = spca504B_PollingDataReady(gspca_dev);
/* Init the cam width height with some values get on init ? */
- reg_w_riv(dev, 0x31, 0x04, 0);
+ reg_w_riv(dev, 0x31, 0, 0x04);
spca504B_WaitCmdStatus(gspca_dev);
rc = spca504B_PollingDataReady(gspca_dev);
break;
default:
/* case BRIDGE_SPCA533: */
/* case BRIDGE_SPCA504B: */
- reg_w_riv(dev, 0, 0x21ad, 0x00); /* hue */
- reg_w_riv(dev, 0, 0x21ac, 0x01); /* sat/hue */
- reg_w_riv(dev, 0, 0x21a3, 0x00); /* gamma */
+ reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */
+ reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */
+ reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */
break;
case BRIDGE_SPCA536:
- reg_w_riv(dev, 0, 0x20f5, 0x40);
- reg_w_riv(dev, 0, 0x20f4, 0x01);
- reg_w_riv(dev, 0, 0x2089, 0x00);
+ reg_w_riv(dev, 0, 0x40, 0x20f5);
+ reg_w_riv(dev, 0, 0x01, 0x20f4);
+ reg_w_riv(dev, 0, 0x00, 0x2089);
break;
}
if (pollreg)
switch (sd->bridge) {
case BRIDGE_SPCA504B:
reg_w_riv(dev, 0x1d, 0x00, 0);
- reg_w_riv(dev, 0, 0x2306, 0x01);
- reg_w_riv(dev, 0, 0x0d04, 0x00);
- reg_w_riv(dev, 0, 0x2000, 0x00);
- reg_w_riv(dev, 0, 0x2301, 0x13);
- reg_w_riv(dev, 0, 0x2306, 0x00);
+ reg_w_riv(dev, 0, 0x01, 0x2306);
+ reg_w_riv(dev, 0, 0x00, 0x0d04);
+ reg_w_riv(dev, 0, 0x00, 0x2000);
+ reg_w_riv(dev, 0, 0x13, 0x2301);
+ reg_w_riv(dev, 0, 0x00, 0x2306);
/* fall thru */
case BRIDGE_SPCA533:
spca504B_PollingDataReady(gspca_dev);
spca504B_WaitCmdStatus(gspca_dev);
break;
default:
- reg_w_riv(dev, 0x31, 0x04, 0);
+ reg_w_riv(dev, 0x31, 0, 0x04);
spca504B_WaitCmdStatus(gspca_dev);
spca504B_PollingDataReady(gspca_dev);
break;
goto error;
}
- mutex_unlock(&ov->lock);
+ mutex_lock(&ov->lock);
return 0;
.amux = TV,
},
},
- [SAA7134_BOARD_ASUS_EUROPA_HYBRID] = {
- .name = "Asus Europa Hybrid OEM",
- .audio_clock = 0x00187de7,
- .tuner_type = TUNER_PHILIPS_TD1316,
- .radio_type = UNSET,
- .tuner_addr = 0x61,
- .radio_addr = ADDR_UNSET,
- .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
- .mpeg = SAA7134_MPEG_DVB,
- .inputs = { {
- .name = name_tv,
- .vmux = 3,
- .amux = TV,
- .tv = 1,
- }, {
- .name = name_comp1,
- .vmux = 4,
- .amux = LINE2,
- }, {
- .name = name_svideo,
- .vmux = 8,
- .amux = LINE2,
- } },
- },
};
.subvendor = PCI_VENDOR_ID_PHILIPS,
.subdevice = 0x2004,
.driver_data = SAA7134_BOARD_ZOLID_HYBRID_PCI,
- }, {
- .vendor = PCI_VENDOR_ID_PHILIPS,
- .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
- .subvendor = 0x1043,
- .subdevice = 0x4847,
- .driver_data = SAA7134_BOARD_ASUS_EUROPA_HYBRID,
}, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
/* break intentionally omitted */
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
- case SAA7134_BOARD_ASUS_EUROPA_HYBRID:
{
/* The Philips EUROPA based hybrid boards have the tuner
break;
case SAA7134_BOARD_PHILIPS_EUROPA:
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
- case SAA7134_BOARD_ASUS_EUROPA_HYBRID:
fe0->dvb.frontend = dvb_attach(tda10046_attach,
&philips_europa_config,
&dev->i2c_adap);
#define SAA7134_BOARD_BEHOLD_X7 171
#define SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM 172
#define SAA7134_BOARD_ZOLID_HYBRID_PCI 173
-#define SAA7134_BOARD_ASUS_EUROPA_HYBRID 174
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
size = entity->processing.bControlSize;
for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
- if (!usb_match_one_id(dev->intf, &blacklist[i].id))
+ if (!usb_match_id(dev->intf, &blacklist[i].id))
continue;
if (blacklist[i].index >= 8 * size ||
if (ioc->bus_type == SPI)
num_chain *= MPT_SCSI_CAN_QUEUE;
- else if (ioc->bus_type == SAS)
- num_chain *= MPT_SAS_CAN_QUEUE;
else
num_chain *= MPT_FC_CAN_QUEUE;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
"Command not in the active list! (sc=%p)\n", ioc->name,
SCpnt));
- retval = SUCCESS;
+ retval = 0;
goto out;
}
wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY)
return 0;
- if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
+ if ((reg == WM8350_GPIO_CONFIGURATION_I_O) ||
+ (reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
reg <= WM8350_GPIO_FUNCTION_SELECT_4) ||
(reg >= WM8350_BATTERY_CHARGER_CONTROL_1 &&
reg <= WM8350_BATTERY_CHARGER_CONTROL_3))
[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
- [ENCLOSURE_STATUS_MAX] = NULL,
};
static const char *const enclosure_type [] = {
break;
}
+ req.name[req.name_len] = '\0';
err = verify_mkvol_req(ubi, &req);
if (err)
break;
}
if (bytes == 0) {
- err = ubi_wl_flush(ubi);
- if (err)
- return err;
-
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
- vol->updating = 0;
- return 0;
+ err = ubi_wl_flush(ubi);
+ if (!err)
+ vol->updating = 0;
}
vol->upd_buf = vmalloc(ubi->leb_size);
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
- err = ubi_wl_flush(ubi);
- if (err)
- return err;
/* The update is finished, clear the update marker */
err = clear_update_marker(ubi, vol, vol->upd_bytes);
if (err)
return err;
- vol->updating = 0;
- err = to_write;
- vfree(vol->upd_buf);
+ err = ubi_wl_flush(ubi);
+ if (err == 0) {
+ vol->updating = 0;
+ err = to_write;
+ vfree(vol->upd_buf);
+ }
}
return err;
vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
vol->alignment = be32_to_cpu(vtbl[i].alignment);
vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
- vol->upd_marker = vtbl[i].upd_marker;
vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
vol->name_len = be16_to_cpu(vtbl[i].name_len);
#define __AT_TESTING 0x0001
#define __AT_RESETTING 0x0002
#define __AT_DOWN 0x0003
- u8 work_event;
-#define ATL1C_WORK_EVENT_RESET 0x01
-#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02
u32 msg_enable;
bool have_msi;
spinlock_t tx_lock;
atomic_t irq_sem;
- struct work_struct common_task;
+ struct work_struct reset_task;
+ struct work_struct link_chg_task;
struct timer_list watchdog_timer;
struct timer_list phy_config_timer;
void atl1c_reinit_locked(struct atl1c_adapter *adapter)
{
+
WARN_ON(in_interrupt());
atl1c_down(adapter);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
}
+static void atl1c_reset_task(struct work_struct *work)
+{
+ struct atl1c_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = container_of(work, struct atl1c_adapter, reset_task);
+ netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ atl1c_down(adapter);
+ atl1c_up(adapter);
+ netif_device_attach(netdev);
+}
+
static void atl1c_check_link_status(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
}
}
+/*
+ * atl1c_link_chg_task - deal with link change event Out of interrupt context
+ * @netdev: network interface device structure
+ */
+static void atl1c_link_chg_task(struct work_struct *work)
+{
+ struct atl1c_adapter *adapter;
+
+ adapter = container_of(work, struct atl1c_adapter, link_chg_task);
+ atl1c_check_link_status(adapter);
+}
+
static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
adapter->link_speed = SPEED_0;
}
}
-
- adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE;
- schedule_work(&adapter->common_task);
-}
-
-static void atl1c_common_task(struct work_struct *work)
-{
- struct atl1c_adapter *adapter;
- struct net_device *netdev;
-
- adapter = container_of(work, struct atl1c_adapter, common_task);
- netdev = adapter->netdev;
-
- if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
- netif_device_detach(netdev);
- atl1c_down(adapter);
- atl1c_up(adapter);
- netif_device_attach(netdev);
- return;
- }
-
- if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
- atl1c_check_link_status(adapter);
-
- return;
+ schedule_work(&adapter->link_chg_task);
}
-
static void atl1c_del_timer(struct atl1c_adapter *adapter)
{
del_timer_sync(&adapter->phy_config_timer);
}
+static void atl1c_cancel_work(struct atl1c_adapter *adapter)
+{
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->link_chg_task);
+}
/*
* atl1c_tx_timeout - Respond to a Tx Hang
struct atl1c_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
- schedule_work(&adapter->common_task);
+ schedule_work(&adapter->reset_task);
}
/*
/* reset MAC */
hw->intr_mask &= ~ISR_ERROR;
AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
- schedule_work(&adapter->common_task);
+ schedule_work(&adapter->reset_task);
break;
}
struct net_device *netdev = adapter->netdev;
atl1c_del_timer(adapter);
- adapter->work_event = 0; /* clear all event */
+ atl1c_cancel_work(adapter);
+
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit(__AT_DOWN, &adapter->flags);
adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
atl1c_hw_set_mac_addr(&adapter->hw);
- INIT_WORK(&adapter->common_task, atl1c_common_task);
- adapter->work_event = 0;
+ INIT_WORK(&adapter->reset_task, atl1c_reset_task);
+ INIT_WORK(&adapter->link_chg_task, atl1c_link_chg_task);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "register netdevice failed\n");
}
return 0;
}
+
+ if (offload_type & SKB_GSO_TCPV6) {
+ real_len = (((unsigned char *)ipv6_hdr(skb) - skb->data)
+ + ntohs(ipv6_hdr(skb)->payload_len));
+ if (real_len < skb->len)
+ pskb_trim(skb, real_len);
+
+ /* check payload == 0 byte ? */
+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (unlikely(skb->len == hdr_len)) {
+ /* only xsum need */
+ dev_warn(&pdev->dev,
+ "IPV6 tso with zero data??\n");
+ goto check_sum;
+ } else {
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ tpd->word3 |= 1 << TPD_IP_VERSION_SHIFT;
+ hdr_len >>= 1;
+ tpd->word3 |= (hdr_len & TPD_V6_IPHLLO_MASK) <<
+ TPD_V6_IPHLLO_SHIFT;
+ tpd->word3 |= ((hdr_len >> 3) &
+ TPD_V6_IPHLHI_MASK) <<
+ TPD_V6_IPHLHI_SHIFT;
+ tpd->word3 |= (tcp_hdrlen(skb) >> 2 &
+ TPD_TCPHDRLEN_MASK) <<
+ TPD_TCPHDRLEN_SHIFT;
+ tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
+ tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
+ }
+ }
+ return 0;
}
check_sum:
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->features |= NETIF_F_LLTX;
netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
return 0;
}
for (k = 0; k< ethaddr_bytes; k++) {
ppattern[offset + magicsync +
(j * ETH_ALEN) + k] = macaddr[k];
- set_bit(len++, (unsigned long *) pmask);
+ len++;
+ set_bit(len, (unsigned long *) pmask);
}
}
return len - 1;
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
-static int bcm_enet_get_sset_count(struct net_device *netdev,
- int string_set)
+static int bcm_enet_get_stats_count(struct net_device *netdev)
{
- switch (string_set) {
- case ETH_SS_STATS:
- return BCM_ENET_STATS_LEN;
- default:
- return -EINVAL;
- }
+ return BCM_ENET_STATS_LEN;
}
static void bcm_enet_get_strings(struct net_device *netdev,
static struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
- .get_sset_count = bcm_enet_get_sset_count,
+ .get_stats_count = bcm_enet_get_stats_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.get_settings = bcm_enet_get_settings,
.set_settings = bcm_enet_set_settings,
#define DRV_VER "2.101.205"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
#define DRV_DESC BE_NAME "Driver"
#define BE_VENDOR_ID 0x19a2
#define BE_DEVICE_ID1 0x211
-#define BE_DEVICE_ID2 0x221
#define OC_DEVICE_ID1 0x700
#define OC_DEVICE_ID2 0x701
-#define OC_DEVICE_ID3 0x710
static inline char *nic_name(struct pci_dev *pdev)
{
- switch (pdev->device) {
- case OC_DEVICE_ID1:
- case OC_DEVICE_ID2:
+ if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
return OC_NAME;
- case OC_DEVICE_ID3:
- return OC_NAME1;
- case BE_DEVICE_ID2:
- return BE3_NAME;
- default:
+ else
return BE_NAME;
- }
}
/* Number of bytes of an RX frame that are copied to skb->data */
u32 cap;
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
- u8 generation; /* BladeEngine ASIC generation */
};
-/* BladeEngine Generation numbers */
-#define BE_GEN2 2
-#define BE_GEN3 3
-
extern const struct ethtool_ops be_ethtool_ops;
#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
- u8 version; /* dword 3 */
- u8 rsvd[3]; /* dword 3 */
+ u32 rsvd; /* dword 3 */
};
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int pcicfg_reg;
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
pci_resource_len(adapter->pdev, 2));
goto pci_map_err;
adapter->db = addr;
- if (adapter->generation == BE_GEN2)
- pcicfg_reg = 1;
- else
- pcicfg_reg = 0;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
- pci_resource_len(adapter->pdev, pcicfg_reg));
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
+ pci_resource_len(adapter->pdev, 1));
if (addr == NULL)
goto pci_map_err;
adapter->pcicfg = addr;
cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
if (cmd->va == NULL)
return -1;
- memset(cmd->va, 0, cmd->size);
return 0;
}
goto rel_reg;
}
adapter = netdev_priv(netdev);
-
- switch (pdev->device) {
- case BE_DEVICE_ID1:
- case OC_DEVICE_ID1:
- adapter->generation = BE_GEN2;
- break;
- case BE_DEVICE_ID2:
- case OC_DEVICE_ID2:
- adapter->generation = BE_GEN3;
- break;
- default:
- adapter->generation = 0;
- }
-
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
&nic->cbs_dma_addr);
if (!nic->cbs)
return -ENOMEM;
- memset(nic->cbs, 0, count * sizeof(struct cb));
for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
cb->link = cpu_to_le32(nic->cbs_dma_addr +
((i+1) % count) * sizeof(struct cb));
+ cb->skb = NULL;
}
nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
/* for ioport free */
int bars;
int need_ioport;
-
- bool discarding;
};
enum e1000_state_t {
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_256:
+ rctl |= E1000_RCTL_SZ_256;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_512:
+ rctl |= E1000_RCTL_SZ_512;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_1024:
+ rctl |= E1000_RCTL_SZ_1024;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
* however with the new *_jumbo_rx* routines, jumbo receives will use
* fragmented skbs */
- if (max_frame <= E1000_RXBUFFER_2048)
+ if (max_frame <= E1000_RXBUFFER_256)
+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
+ else if (max_frame <= E1000_RXBUFFER_512)
+ adapter->rx_buffer_len = E1000_RXBUFFER_512;
+ else if (max_frame <= E1000_RXBUFFER_1024)
+ adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+ else if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
else
#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
- * packet, if thats the case we need to toss it. In fact, we
- * to toss every packet with the EOP bit clear and the next
- * frame that _does_ have the EOP bit set, as it is by
- * definition only a frame fragment
- */
- if (unlikely(!(status & E1000_RXD_STAT_EOP)))
- adapter->discarding = true;
-
- if (adapter->discarding) {
+ * packet, also make sure the frame isn't just CRC only */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
/* recycle */
buffer_info->skb = skb;
- if (status & E1000_RXD_STAT_EOP)
- adapter->discarding = false;
goto next_desc;
}
/* CRC Stripping defines */
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
-#define FLAG2_IS_DISCARDING (1 << 2)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
length = le16_to_cpu(rx_desc->length);
- /*
- * !EOP means multiple descriptors were used to store a single
- * packet, if that's the case we need to toss it. In fact, we
- * need to toss every packet with the EOP bit clear and the
- * next frame that _does_ have the EOP bit set, as it is by
- * definition only a frame fragment
- */
- if (unlikely(!(status & E1000_RXD_STAT_EOP)))
- adapter->flags2 |= FLAG2_IS_DISCARDING;
-
- if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ /* !EOP means multiple descriptors were used to store a single
+ * packet, also make sure the frame isn't just CRC only */
+ if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
/* All receives must fit into a single buffer */
e_dbg("%s: Receive packet consumed multiple buffers\n",
netdev->name);
/* recycle */
buffer_info->skb = skb;
- if (status & E1000_RXD_STAT_EOP)
- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
- /* see !EOP comment in other rx routine */
- if (!(staterr & E1000_RXD_STAT_EOP))
- adapter->flags2 |= FLAG2_IS_DISCARDING;
-
- if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ if (!(staterr & E1000_RXD_STAT_EOP)) {
e_dbg("%s: Packet Split buffers didn't pick up the "
"full packet\n", netdev->name);
dev_kfree_skb_irq(skb);
- if (staterr & E1000_RXD_STAT_EOP)
- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
- adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, adapter->hw.hw_addr + rx_ring->head);
writel(0, adapter->hw.hw_addr + rx_ring->tail);
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
+ case 256:
+ rctl |= E1000_RCTL_SZ_256;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case 512:
+ rctl |= E1000_RCTL_SZ_512;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case 1024:
+ rctl |= E1000_RCTL_SZ_1024;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
case 2048:
default:
rctl |= E1000_RCTL_SZ_2048;
* fragmented skbs
*/
- if (max_frame <= 2048)
+ if (max_frame <= 256)
+ adapter->rx_buffer_len = 256;
+ else if (max_frame <= 512)
+ adapter->rx_buffer_len = 512;
+ else if (max_frame <= 1024)
+ adapter->rx_buffer_len = 1024;
+ else if (max_frame <= 2048)
adapter->rx_buffer_len = 2048;
else
adapter->rx_buffer_len = 4096;
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
- if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+ return status;
+ }
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
struct sockaddr *addr = p;
int status;
+ if (netif_running(ndev))
+ return -EBUSY;
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
struct net_device *ndev, int cards_found)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- int err = 0;
+ int pos, err = 0;
+ u16 val16;
memset((void *)qdev, 0, sizeof(*qdev));
err = pci_enable_device(pdev);
qdev->ndev = ndev;
qdev->pdev = pdev;
pci_set_drvdata(pdev, ndev);
-
- /* Set PCIe read request size */
- err = pcie_set_readrq(pdev, 4096);
- if (err) {
- dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_out;
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pos <= 0) {
+ dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
+ "aborting.\n");
+ return pos;
+ } else {
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
+ val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+ val16 |= (PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
+ pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
}
err = pci_request_regions(pdev, DRV_NAME);
ql_aen_lost(qdev, mbcp);
break;
- case AEN_DCBX_CHG:
- /* Need to support AEN 8110 */
- break;
default:
QPRINTK(qdev, DRV, ERR,
"Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
tx_queue->efx->type->txd_ring_mask];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
+ buffer->len = 0;
+ buffer->continuation = true;
if (buffer->unmap_len) {
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
}
- buffer->len = 0;
- buffer->continuation = true;
}
}
sky2->tx_cons = idx;
smp_mb();
- /* Wake unless it's detached, and called e.g. from sky2_down() */
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
netif_wake_queue(dev);
}
if (retval) {
printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
FIRMWARE_RX);
- goto out_init;
+ return retval;
}
if (fw_rx->size % 4) {
printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
release_firmware(fw_tx);
out_rx:
release_firmware(fw_rx);
-out_init:
- if (retval)
- netdev_close(dev);
return retval;
}
dbg("%02X:", netdev->dev_addr[i]);
dbg("%02X\n", netdev->dev_addr[i]);
/* Set the IDR registers. */
- set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
+ set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
#ifdef EEPROM_WRITE
{
u8 cr;
return 0;
}
-static int bios_warned;
-
int __init check_zero_address(void)
{
struct acpi_table_dmar *dmar;
}
if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
- void __iomem *addr;
- u64 cap, ecap;
-
drhd = (void *)entry_header;
if (!drhd->address) {
/* Promote an attitude of violence to a BIOS engineer today */
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- goto failed;
- }
-
- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
- if (!addr ) {
- printk("IOMMU: can't validate: %llx\n", drhd->address);
- goto failed;
- }
- cap = dmar_readq(addr + DMAR_CAP_REG);
- ecap = dmar_readq(addr + DMAR_ECAP_REG);
- early_iounmap(addr, VTD_PAGE_SIZE);
- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->address,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- goto failed;
+#ifdef CONFIG_DMAR
+ dmar_disabled = 1;
+#endif
+ return 0;
}
+ break;
}
entry_header = ((void *)entry_header + entry_header->length);
}
return 1;
-
-failed:
-#ifdef CONFIG_DMAR
- dmar_disabled = 1;
-#endif
- return 0;
}
void __init detect_intel_iommu(void)
int agaw = 0;
int msagaw = 0;
- if (!drhd->reg_base_addr) {
- if (!bios_warned) {
- WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- }
- return -EINVAL;
- }
-
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return -ENOMEM;
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- if (!bios_warned) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- }
+ /* Promote an attitude of violence to a BIOS engineer today */
+ WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->reg_base_addr,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
goto err_unmap;
}
/* Skip top levels of page tables for
* iommu which has less agaw than default.
- * Unnecessary for PT mode.
*/
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return -ENOMEM;
- }
+ for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd)) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return -ENOMEM;
}
}
}
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
- if (end < start) {
- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- ret = -EIO;
- goto error;
- }
-
if (end >> agaw_to_width(domain->agaw)) {
WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
- if (iommu_no_mapping(dev))
- return 0;
-
domain = find_domain(pdev);
if (!domain)
return 0;
return 1;
}
-void __weak pci_fixup_cardbus(struct pci_bus *bus)
-{
-}
-EXPORT_SYMBOL(pci_fixup_cardbus);
-
static int __init pci_setup(char *str)
{
while (str) {
if (ret)
goto out_put;
- if (find_aer_device(rpdev, &edev)) {
- if (!get_service_data(edev)) {
- printk(KERN_WARNING "AER service is not initialized\n");
- ret = -EINVAL;
- goto out_put;
- }
+ if (find_aer_device(rpdev, &edev))
aer_irq(-1, edev);
- }
else
ret = -EINVAL;
out_put:
struct pci_dev *bridge = bus->self;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
+ int pref_mem64;
if (pci_is_enabled(bridge))
return;
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
/* Set up PREF base/limit. */
+ pref_mem64 = 0;
bu = lu = 0;
pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
+ pref_mem64 = 1;
bu = upper_32_bits(region.start);
lu = upper_32_bits(region.end);
width = 16;
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ if (pref_mem64) {
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ }
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
unsigned int max, pass;
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
- pci_fixup_cardbus(bus);
+// pcibios_fixup_bus(bus);
max = bus->secondary;
for (pass = 0; pass < 2; pass++)
*/
#undef START_IN_KERNEL_MODE
-#define DRV_VER "0.5.20"
+#define DRV_VER "0.5.18"
/*
* According to the Atom N270 datasheet,
MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check");
/*
- * cmd_off: to switch the fan completely off
- * chk_off: to check if the fan is off
+ * cmd_off: to switch the fan completely off / to check if the fan is off
* cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then
* the fan speed depending on the temperature
*/
struct fancmd {
u8 cmd_off;
- u8 chk_off;
u8 cmd_auto;
};
/* Register addresses and values for different BIOS versions */
static const struct bios_settings_t bios_tbl[] = {
/* AOA110 */
- {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x21, 0x00} },
- {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x21, 0x00} },
- {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
/* AOA150 */
- {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x20, 0x00} },
- /* Acer 1410 */
- {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
/* special BIOS / other */
- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x21, 0x00} },
- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Gateway ", "LT31 ", "v1.3103 ", 0x55, 0x58,
- {0x10, 0x0f, 0x00} },
- {"Gateway ", "LT31 ", "v1.3201 ", 0x55, 0x58,
- {0x10, 0x0f, 0x00} },
- {"Gateway ", "LT31 ", "v1.3302 ", 0x55, 0x58,
- {0x10, 0x0f, 0x00} },
- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x21, 0x00} },
- {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x21, 0x00} },
- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
/* pewpew-terminator */
- {"", "", "", 0, 0, {0, 0, 0} }
+ {"", "", "", 0, 0, {0, 0} }
};
static const struct bios_settings_t *bios_cfg __read_mostly;
if (ec_read(bios_cfg->fanreg, &fan))
return -EINVAL;
- if (fan != bios_cfg->cmd.chk_off)
+ if (fan != bios_cfg->cmd.cmd_off)
*state = ACERHDF_FAN_AUTO;
else
*state = ACERHDF_FAN_OFF;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Feuerer");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
-MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
+MODULE_ALIAS("dmi:*:*Acer*:*:");
+MODULE_ALIAS("dmi:*:*Gateway*:*:");
+MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
*/
static const struct acpi_device_id asus_device_ids[] = {
{"ATK0100", 0},
- {"ATK0101", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, asus_device_ids);
enum { KE_KEY, KE_END };
static struct key_entry asus_keymap[] = {
- {KE_KEY, 0x02, KEY_SCREENLOCK},
- {KE_KEY, 0x05, KEY_WLAN},
- {KE_KEY, 0x08, BTN_TOUCH},
- {KE_KEY, 0x17, KEY_ZOOM},
- {KE_KEY, 0x1f, KEY_BATTERY},
{KE_KEY, 0x30, KEY_VOLUMEUP},
{KE_KEY, 0x31, KEY_VOLUMEDOWN},
{KE_KEY, 0x32, KEY_MUTE},
{KE_KEY, 0x5F, KEY_WLAN},
{KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
{KE_KEY, 0x82, KEY_CAMERA},
{KE_KEY, 0x8A, KEY_PROG1},
hotk->ledd_status = 0xFFF;
/* Set initial values of light sensor and level */
- hotk->light_switch = 0; /* Default to light sensor disabled */
- hotk->light_level = 5; /* level 5 for sensor sensitivity */
+ hotk->light_switch = 1; /* Default to light sensor disabled */
+ hotk->light_level = 0; /* level 5 for sensor sensitivity */
if (ls_switch_handle)
set_light_sens_switch(hotk->light_switch);
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
- acpi_status status;
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
- return;
- }
+ wmi_get_event_data(value, &response);
obj = (union acpi_object *)response.pointer;
printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
buffer[1] & 0xFFFF);
}
- kfree(obj);
}
static int __init dell_wmi_input_setup(void)
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
- acpi_status status;
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status);
- return;
- }
+ wmi_get_event_data(value, &response);
obj = (union acpi_object *)response.pointer;
eventcode);
} else
printk(KERN_INFO "HP WMI: Unknown response received\n");
-
- kfree(obj);
}
static int __init hp_wmi_input_setup(void)
#define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw"
+static void bluetooth_suspend(pm_message_t state)
+{
+ /* Try to make sure radio will resume powered off */
+ if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
+ TP_ACPI_BLTH_PWR_OFF_ON_RESUME))
+ vdbg_printk(TPACPI_DBG_RFKILL,
+ "bluetooth power down on resume request failed\n");
+}
+
static int bluetooth_get_status(void)
{
int status;
#endif
/* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
- status = TP_ACPI_BLUETOOTH_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_BLUETOOTH_RADIOSSW;
+ status = TP_ACPI_BLUETOOTH_RADIOSSW;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
.read = bluetooth_read,
.write = bluetooth_write,
.exit = bluetooth_exit,
+ .suspend = bluetooth_suspend,
.shutdown = bluetooth_shutdown,
};
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
+static void wan_suspend(pm_message_t state)
+{
+ /* Try to make sure radio will resume powered off */
+ if (!acpi_evalf(NULL, NULL, "\\WGSV", "qvd",
+ TP_ACPI_WGSV_PWR_OFF_ON_RESUME))
+ vdbg_printk(TPACPI_DBG_RFKILL,
+ "WWAN power down on resume request failed\n");
+}
+
static int wan_get_status(void)
{
int status;
}
#endif
- /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
- status = TP_ACPI_WANCARD_RESUMECTRL;
+ /* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_WANCARD_RADIOSSW;
+ status = TP_ACPI_WANCARD_RADIOSSW;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
.read = wan_read,
.write = wan_write,
.exit = wan_exit,
+ .suspend = wan_suspend,
.shutdown = wan_shutdown,
};
/* Models with Intel Extreme Graphics 2 */
TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
- TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
- TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
+ TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
/* Models with Intel GMA900 */
TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
/**
* wmi_get_event_data - Get WMI data associated with an event
*
- * @event: Event to find
- * @out: Buffer to hold event data. out->pointer should be freed with kfree()
+ * @event - Event to find
+ * &out - Buffer to hold event data
*
* Returns extra data associated with an event in WMI.
*/
static void print_constraints(struct regulator_dev *rdev)
{
struct regulation_constraints *constraints = rdev->constraints;
- char buf[80] = "";
+ char buf[80];
int count;
if (rdev->desc->type == REGULATOR_VOLTAGE) {
led->isink_init.consumer_supplies = &led->isink_consumer;
led->isink_init.constraints.min_uA = 0;
led->isink_init.constraints.max_uA = pdata->max_uA;
- led->isink_init.constraints.valid_ops_mask
- = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS;
+ led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
if (ret != 0) {
led->dcdc_init.num_consumer_supplies = 1;
led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
- led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
if (ret != 0) {
platform_device_put(pdev);
#define cmos_pnp_resume NULL
#endif
-static void cmos_pnp_shutdown(struct pnp_dev *pnp)
+static void cmos_pnp_shutdown(struct device *pdev)
{
- if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
+ if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev))
return;
cmos_do_shutdown();
.id_table = rtc_ids,
.probe = cmos_pnp_probe,
.remove = __exit_p(cmos_pnp_remove),
- .shutdown = cmos_pnp_shutdown,
/* flag ensures resume() gets called, and stops syslog spam */
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.suspend = cmos_pnp_suspend,
.resume = cmos_pnp_resume,
+ .driver = {
+ .name = (char *)driver_name,
+ .shutdown = cmos_pnp_shutdown,
+ }
};
#endif /* CONFIG_PNP */
}
/* Disabling calibration mode */
- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) {
+ if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL)
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_CAL));
dev_warn(&client->dev, "Disabling calibration mode!\n");
- }
/* Disabling read and write modes */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE ||
- fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) {
+ fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ)
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_READ |
FM3130_RTC_CONTROL_BIT_WRITE));
dev_warn(&client->dev, "Disabling READ or WRITE mode!\n");
- }
/* oscillator off? turn it on, so clock can tick. */
if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN)
return;
cqr = (struct dasd_ccw_req *) intparm;
if (cqr->status != DASD_CQR_IN_IO) {
- DBF_EVENT_DEVID(DBF_DEBUG, cdev,
- "invalid status in handle_killed_request: "
- "%02x", cqr->status);
+ DBF_EVENT(DBF_DEBUG,
+ "invalid status in handle_killed_request: "
+ "bus_id %s, status %02x",
+ dev_name(&cdev->dev), cqr->status);
return;
}
if (device == NULL ||
device != dasd_device_from_cdev_locked(cdev) ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
- DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
- "invalid device in request");
+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
+ "bus_id %s", dev_name(&cdev->dev));
return;
}
case -EIO:
break;
case -ETIMEDOUT:
- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
- "request timed out\n", __func__);
+ DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
+ __func__, dev_name(&cdev->dev));
break;
default:
- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
- "unknown error %ld\n", __func__,
- PTR_ERR(irb));
+ DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
+ __func__, dev_name(&cdev->dev), PTR_ERR(irb));
}
dasd_handle_killed_request(cdev, intparm);
return;
device = (struct dasd_device *) cqr->startdev;
if (!device ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
- DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
- "invalid device in request");
+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
+ "bus_id %s", dev_name(&cdev->dev));
return;
}
}
ret = dasd_add_sysfs_files(cdev);
if (ret) {
- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
- "dasd_generic_probe: could not add "
- "sysfs entries");
+ DBF_EVENT(DBF_WARNING,
+ "dasd_generic_probe: could not add sysfs entries "
+ "for %s\n", dev_name(&cdev->dev));
return ret;
}
cdev->handler = &dasd_int_handler;
mdsk_term_io(device);
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
- if (rc == 4) {
- if (!(device->features & DASD_FEATURE_READONLY)) {
- dev_warn(&device->cdev->dev,
- "The access mode of a DIAG device changed"
- " to read-only");
- device->features |= DASD_FEATURE_READONLY;
- }
- rc = 0;
- }
if (rc)
dev_warn(&device->cdev->dev, "DIAG ERP failed with "
"rc=%d\n", rc);
for (sb = 512; sb < bsize; sb = sb << 1)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
- if (rc && (rc != 4)) {
+ if (rc) {
dev_warn(&device->cdev->dev, "DIAG initialization "
"failed with rc=%d\n", rc);
rc = -EIO;
} else {
- if (rc == 4)
- device->features |= DASD_FEATURE_READONLY;
dev_info(&device->cdev->dev,
- "New DASD with %ld byte/block, total size %ld KB%s\n",
+ "New DASD with %ld byte/block, total size %ld KB\n",
(unsigned long) block->bp_block,
(unsigned long) (block->blocks <<
- block->s2b_shift) >> 1,
- (rc == 4) ? ", read-only device" : "");
- rc = 0;
+ block->s2b_shift) >> 1);
}
out_label:
free_page((long) label);
/* set ECKD specific ccw-device options */
ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
if (ret) {
- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
- "dasd_eckd_probe: could not set "
- "ccw-device options");
+ DBF_EVENT(DBF_WARNING,
+ "dasd_eckd_probe: could not set ccw-device options "
+ "for %s\n", dev_name(&cdev->dev));
return ret;
}
ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
rc = dasd_eckd_read_conf_lpm(device, &conf_data,
&conf_len, lpm);
if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ DBF_EVENT(DBF_WARNING,
"Read configuration data returned "
- "error %d", rc);
+ "error %d for device: %s", rc,
+ dev_name(&device->cdev->dev));
return rc;
}
if (conf_data == NULL) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "No configuration data "
- "retrieved");
+ DBF_EVENT(DBF_WARNING, "No configuration "
+ "data retrieved for device: %s",
+ dev_name(&device->cdev->dev));
continue; /* no error */
}
/* save first valid configuration data */
sizeof(struct dasd_rssd_features)),
device);
if (IS_ERR(cqr)) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
- "allocate initialization request");
+ DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
+ "request for device: %s",
+ dev_name(&device->cdev->dev));
return PTR_ERR(cqr);
}
cqr->startdev = device;
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
private = (struct dasd_eckd_private *) device->private;
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
- "returned rc=%d", private->uid.ssid, rc);
+ DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
+ "returned rc=%d for device: %s",
+ private->uid.vendor, private->uid.serial,
+ private->uid.ssid, rc, dev_name(&device->cdev->dev));
/* RE-Read Configuration Data */
return dasd_eckd_read_conf(device);
}
if (private->uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "could not allocate dasd "
- "block structure");
+ DBF_EVENT(DBF_WARNING, "could not allocate dasd "
+ "block structure for device: %s",
+ dev_name(&device->cdev->dev));
rc = PTR_ERR(block);
goto out_err1;
}
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
- "Read device characteristic failed, rc=%d", rc);
+ DBF_EVENT(DBF_WARNING,
+ "Read device characteristics failed, rc=%d for "
+ "device: %s", rc, dev_name(&device->cdev->dev));
goto out_err3;
}
/* find the vaild cylinder size */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
- scsw_cc(&irb->scsw), req ? req->intrc : 0);
+ scsw_cc(&irb->scsw), req->intrc);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&temp_rdc_data, 64);
if (rc) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
- "Read device characteristic failed, rc=%d", rc);
+ DBF_EVENT(DBF_WARNING,
+ "Read device characteristics failed, rc=%d for "
+ "device: %s", rc, dev_name(&device->cdev->dev));
goto out_err;
}
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
- "dasd block structure");
+ DBF_EVENT(DBF_WARNING, "could not allocate dasd block "
+ "structure for device: %s",
+ dev_name(&device->cdev->dev));
device->private = NULL;
kfree(private);
return PTR_ERR(block);
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
&private->rdc_data, 32);
if (rc) {
- DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
- "characteristics returned error %d", rc);
+ DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
+ "error %d for device: %s",
+ rc, dev_name(&device->cdev->dev));
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
d_data); \
} while(0)
-#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
-do { \
- struct ccw_dev_id __dev_id; \
- ccw_device_get_id(d_cdev, &__dev_id); \
- debug_sprintf_event(dasd_debug_area, \
- d_level, \
- "0.%x.%04x " d_str "\n", \
- __dev_id.ssid, __dev_id.devno, d_data); \
-} while (0)
-
#define DBF_EXC(d_level, d_str, d_data...)\
do { \
debug_sprintf_exception(dasd_debug_area, \
struct ccw_dev_id dev_id;
base = block->base;
- if (!base->discipline || !base->discipline->fill_info)
+ if (!base->discipline->fill_info)
return -EINVAL;
dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
dasd_info->features |=
((base->features & DASD_FEATURE_READONLY) != 0);
- memcpy(dasd_info->type, base->discipline->name, 4);
+ if (base->discipline)
+ memcpy(dasd_info->type, base->discipline->name, 4);
+ else
+ memcpy(dasd_info->type, "none", 4);
if (block->request_queue->request_fn) {
struct list_head *l;
/* Print device number. */
seq_printf(m, "%s", dev_name(&device->cdev->dev));
/* Print discipline string. */
- if (device->discipline != NULL)
+ if (device != NULL && device->discipline != NULL)
seq_printf(m, "(%s)", device->discipline->name);
else
seq_printf(m, "(none)");
substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
seq_printf(m, "%4s: ", substr);
/* Print device status information. */
- switch (device->state) {
+ switch ((device != NULL) ? device->state : -1) {
+ case -1:
+ seq_printf(m, "unknown");
+ break;
case DASD_STATE_NEW:
seq_printf(m, "new");
break;
sch->private = kzalloc(sizeof(struct io_subchannel_private),
GFP_KERNEL | GFP_DMA);
if (!sch->private)
- goto out_schedule;
+ goto out_err;
/*
* First check if a fitting device may be found amongst the
* disconnected devices or in the orphanage.
}
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev))
- goto out_schedule;
+ goto out_err;
rc = io_subchannel_recog(cdev, sch);
if (rc) {
spin_lock_irqsave(sch->lock, flags);
spin_unlock_irqrestore(sch->lock, flags);
}
return 0;
-
+out_err:
+ kfree(sch->private);
+ sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
out_schedule:
io_subchannel_schedule_removal(sch);
return 0;
cdev = sch_get_cdev(sch);
if (!cdev)
- goto out_free;
+ return 0;
/* Set ccw device to not operational and drop reference. */
spin_lock_irqsave(cdev->ccwlock, flags);
sch_set_cdev(sch, NULL);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
ccw_device_unregister(cdev);
-out_free:
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
ccw_device_start_id(cdev, 0);
}
-static void ccw_device_disabled_irq(struct ccw_device *cdev,
- enum dev_event dev_event)
+static void
+ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
- * An interrupt in a disabled state means a previous disable was not
+ * An interrupt in state offline means a previous disable was not
* successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
{
}
+/*
+ * Bug operation action.
+ */
+static void
+ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
+ "0.%x.%04x\n", cdev->private->state, dev_event,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ BUG();
+}
+
/*
* device statemachine
*/
fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_NOT_OPER] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
- [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
+ [DEV_EVENT_INTERRUPT] = ccw_device_bug,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
},
[DEV_STATE_OFFLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
+ [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_DISCONNECTED] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_start_id,
- [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_TIMEOUT] = ccw_device_bug,
[DEV_EVENT_VERIFY] = ccw_device_start_id,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] = {
zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
return -EAGAIN;
}
- if (service_rc == 8 && service_rs == 72)
- return -EINVAL;
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
}
if (service_rc == 12 && service_rs == 769)
return -EINVAL;
- if (service_rc == 8 && service_rs == 72)
- return -EINVAL;
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
if (privptr) {
privptr->stats.tx_packets++;
privptr->stats.tx_bytes +=
(skb->len - NETIUCV_HDRLEN
- - NETIUCV_HDRLEN);
+ - NETIUCV_HDRLEN);
}
- dev_kfree_skb_any(skb);
}
}
conn->tx_buff->data = conn->tx_buff->head;
sdev = to_scsi_device(dev);
if (action == BUS_NOTIFY_ADD_DEVICE) {
- err = device_create_file(dev, &scsi_dh_state_attr);
- /* don't care about err */
devinfo = device_handler_match(NULL, sdev);
- if (devinfo)
- err = scsi_dh_handler_attach(sdev, devinfo);
+ if (!devinfo)
+ goto out;
+
+ err = scsi_dh_handler_attach(sdev, devinfo);
+ if (!err)
+ err = device_create_file(dev, &scsi_dh_state_attr);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
device_remove_file(dev, &scsi_dh_state_attr);
scsi_dh_handler_detach(sdev, NULL);
}
+out:
return err;
}
.change_queue_depth = fc_change_queue_depth,
.change_queue_type = fc_change_queue_type,
.this_id = -1,
- .cmd_per_lun = 3,
+ .cmd_per_lun = 32,
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
{
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct netdev_hw_addr *ha;
- struct net_device *real_dev;
u8 flogi_maddr[ETH_ALEN];
fcoe->netdev = netdev;
/* look for SAN MAC address, if multiple SAN MACs exist, only
* use the first one for SPMA */
- real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
- vlan_dev_real_dev(netdev) : netdev;
rcu_read_lock();
- for_each_dev_addr(real_dev, ha) {
+ for_each_dev_addr(netdev, ha) {
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
- (is_valid_ether_addr(ha->addr))) {
+ (is_valid_ether_addr(fip->ctl_src_addr))) {
memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
fip->spma = 1;
break;
{
struct net_device *n = fcoe_netdev(lp);
- if (n->netdev_ops->ndo_fcoe_ddp_setup)
+ if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
return 0;
{
struct net_device *n = fcoe_netdev(lp);
- if (n->netdev_ops->ndo_fcoe_ddp_done)
+ if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
return 0;
}
{
struct fcoe_interface *fcoe;
struct net_device *netdev;
- int rc = 0;
+ int rc;
mutex_lock(&fcoe_config_mutex);
#ifdef CONFIG_FCOE_MODULE
EXPORT_SYMBOL(scsi_remove_host);
/**
- * scsi_add_host_with_dma - add a scsi host with dma device
+ * scsi_add_host - add a scsi host
* @shost: scsi host pointer to add
* @dev: a struct device of type scsi class
- * @dma_dev: dma device for the host
- *
- * Note: You rarely need to worry about this unless you're in a
- * virtualised host environments, so use the simpler scsi_add_host()
- * function instead.
*
* Return value:
* 0 on success / != 0 for error
**/
-int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
- struct device *dma_dev)
+int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
{
struct scsi_host_template *sht = shost->hostt;
int error = -EINVAL;
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
- shost->dma_dev = dma_dev;
error = device_add(&shost->shost_gendev);
if (error)
fail:
return error;
}
-EXPORT_SYMBOL(scsi_add_host_with_dma);
+EXPORT_SYMBOL(scsi_add_host);
static void scsi_host_dev_release(struct device *dev)
{
int rc;
ENTER;
- ioa_cfg->pdev->state_saved = true;
rc = pci_restore_state(ioa_cfg->pdev);
if (rc != PCIBIOS_SUCCESSFUL) {
disc, lport->e_d_tov))
return;
err:
- fc_disc_error(disc, NULL);
+ fc_disc_error(disc, fp);
}
/**
did = FC_FID_DIR_SERV;
}
- if (rc) {
- fc_frame_free(fp);
+ if (rc)
return NULL;
- }
fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
if (!fsp)
return;
- if (fsp->xfer_ddp == FC_XID_UNKNOWN)
- return;
-
lp = fsp->lp;
- if (lp->tt.ddp_done) {
+ if (fsp->xfer_ddp && lp->tt.ddp_done) {
fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp);
- fsp->xfer_ddp = FC_XID_UNKNOWN;
+ fsp->xfer_ddp = 0;
}
}
tlen -= sg_bytes;
remaining -= sg_bytes;
- if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
- (tlen))
+ if (tlen)
continue;
/*
seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
if (!seq) {
+ fc_frame_free(fp);
rc = -1;
goto unlock;
}
fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
return;
}
+ fc_frame_free(fp);
retry:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
- if (!seq)
+ if (!seq) {
+ fc_frame_free(fp);
goto retry;
-
+ }
fsp->recov_seq = seq;
fsp->xfer_len = offset;
fsp->xfer_contig_end = offset;
fsp->cmd = sc_cmd; /* save the cmd */
fsp->lp = lp; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
- fsp->xfer_ddp = FC_XID_UNKNOWN;
sc_cmd->scsi_done = done;
/*
* scsi status is good but transport level
* underrun.
*/
- sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
- DID_OK : DID_ERROR) << 16;
+ sc_cmd->result = DID_OK << 16;
} else {
/*
* scsi got underrun, this is an error
int fc_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ int queue_depth;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- if (sdev->tagged_supported)
- scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
- else
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
- FC_FCP_DFLT_QUEUE_DEPTH);
-
+ if (sdev->tagged_supported) {
+ if (sdev->host->hostt->cmd_per_lun)
+ queue_depth = sdev->host->hostt->cmd_per_lun;
+ else
+ queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
+ scsi_activate_tcq(sdev, queue_depth);
+ }
return 0;
}
EXPORT_SYMBOL(fc_slave_alloc);
* @sp: current sequence in the RLIR exchange
* @fp: RLIR request frame
*
- * Locking Note: The lport lock is expected to be held before calling
+ * Locking Note: The lport lock is exected to be held before calling
* this function.
*/
static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
* @sp: current sequence in the ECHO exchange
* @fp: ECHO request frame
*
- * Locking Note: The lport lock is expected to be held before calling
+ * Locking Note: The lport lock is exected to be held before calling
* this function.
*/
static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
void *dp;
u32 f_ctl;
- FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
len = fr_len(in_fp) - sizeof(struct fc_frame_header);
if (fp) {
dp = fc_frame_payload_get(fp, len);
memcpy(dp, pp, len);
- *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
+ *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
sp = lport->tt.seq_start_next(sp);
f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
}
/**
- * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
- * @sp: The sequence in the RNID exchange
- * @fp: The RNID request frame
- * @lport: The local port recieving the RNID
+ * fc_lport_recv_echo_req() - Handle received Request Node ID data request
+ * @lport: Fibre Channel local port recieving the RNID
+ * @sp: current sequence in the RNID exchange
+ * @fp: RNID request frame
*
- * Locking Note: The lport lock is expected to be held before calling
+ * Locking Note: The lport lock is exected to be held before calling
* this function.
*/
static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
* Accept it with the common service parameters indicating our N port.
* Set up to do a PLOGI if we have the higher-number WWPN.
*
- * Locking Note: The lport lock is expected to be held before calling
+ * Locking Note: The lport lock is exected to be held before calling
* this function.
*/
static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
fc_lport_scr_resp, lport, lport->e_d_tov))
- fc_lport_error(lport, NULL);
+ fc_lport_error(lport, fp);
}
/**
if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
fc_lport_rpn_id_resp,
lport, lport->e_d_tov))
- fc_lport_error(lport, NULL);
+ fc_lport_error(lport, fp);
}
static struct fc_rport_operations fc_lport_rport_ops = {
switch (lport->state) {
case LPORT_ST_DISABLED:
- WARN_ON(1);
- break;
case LPORT_ST_READY:
- WARN_ON(1);
- break;
case LPORT_ST_RESET:
+ WARN_ON(1);
break;
case LPORT_ST_FLOGI:
fc_lport_enter_flogi(lport);
if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
fc_lport_logo_resp, lport, lport->e_d_tov))
- fc_lport_error(lport, NULL);
+ fc_lport_error(lport, fp);
}
/**
if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
fc_lport_flogi_resp, lport, lport->e_d_tov))
- fc_lport_error(lport, NULL);
+ fc_lport_error(lport, fp);
}
/* Configure a fc_lport */
[RPORT_ST_LOGO] = "LOGO",
[RPORT_ST_ADISC] = "ADISC",
[RPORT_ST_DELETE] = "Delete",
- [RPORT_ST_RESTART] = "Restart",
};
/**
struct fc_rport_priv *rdata;
list_for_each_entry(rdata, &lport->disc.rports, peers)
- if (rdata->ids.port_id == port_id)
+ if (rdata->ids.port_id == port_id &&
+ rdata->rp_state != RPORT_ST_DELETE)
return rdata;
return NULL;
}
struct fc_rport_operations *rport_ops;
struct fc_rport_identifiers ids;
struct fc_rport *rport;
- int restart = 0;
mutex_lock(&rdata->rp_mutex);
event = rdata->event;
mutex_unlock(&rdata->rp_mutex);
if (port_id != FC_FID_DIR_SERV) {
- /*
- * We must drop rp_mutex before taking disc_mutex.
- * Re-evaluate state to allow for restart.
- * A transition to RESTART state must only happen
- * while disc_mutex is held and rdata is on the list.
- */
mutex_lock(&lport->disc.disc_mutex);
- mutex_lock(&rdata->rp_mutex);
- if (rdata->rp_state == RPORT_ST_RESTART)
- restart = 1;
- else
- list_del(&rdata->peers);
- rdata->event = RPORT_EV_NONE;
- mutex_unlock(&rdata->rp_mutex);
+ list_del(&rdata->peers);
mutex_unlock(&lport->disc.disc_mutex);
}
mutex_unlock(&rdata->rp_mutex);
fc_remote_port_delete(rport);
}
- if (restart) {
- mutex_lock(&rdata->rp_mutex);
- FC_RPORT_DBG(rdata, "work restart\n");
- fc_rport_enter_plogi(rdata);
- mutex_unlock(&rdata->rp_mutex);
- } else
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
break;
default:
FC_RPORT_DBG(rdata, "ADISC port\n");
fc_rport_enter_adisc(rdata);
break;
- case RPORT_ST_RESTART:
- break;
- case RPORT_ST_DELETE:
- FC_RPORT_DBG(rdata, "Restart deleted port\n");
- fc_rport_state_enter(rdata, RPORT_ST_RESTART);
- break;
default:
FC_RPORT_DBG(rdata, "Login to port\n");
fc_rport_enter_plogi(rdata);
if (rdata->rp_state == RPORT_ST_DELETE) {
FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
+ mutex_unlock(&rdata->rp_mutex);
goto out;
}
- if (rdata->rp_state == RPORT_ST_RESTART)
- FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
- else
- fc_rport_enter_logo(rdata);
+ fc_rport_enter_logo(rdata);
/*
* Change the state to Delete so that we discard
* the response.
*/
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
-out:
mutex_unlock(&rdata->rp_mutex);
+
+out:
return 0;
}
case RPORT_ST_READY:
case RPORT_ST_INIT:
case RPORT_ST_DELETE:
- case RPORT_ST_RESTART:
break;
}
fc_rport_enter_logo(rdata);
break;
case RPORT_ST_DELETE:
- case RPORT_ST_RESTART:
case RPORT_ST_READY:
case RPORT_ST_INIT:
break;
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
fc_rport_plogi_resp, rdata, lport->e_d_tov))
- fc_rport_error_retry(rdata, NULL);
+ fc_rport_error_retry(rdata, fp);
else
kref_get(&rdata->kref);
}
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
fc_rport_prli_resp, rdata, lport->e_d_tov))
- fc_rport_error_retry(rdata, NULL);
+ fc_rport_error_retry(rdata, fp);
else
kref_get(&rdata->kref);
}
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
fc_rport_rtv_resp, rdata, lport->e_d_tov))
- fc_rport_error_retry(rdata, NULL);
+ fc_rport_error_retry(rdata, fp);
else
kref_get(&rdata->kref);
}
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
fc_rport_logo_resp, rdata, lport->e_d_tov))
- fc_rport_error_retry(rdata, NULL);
+ fc_rport_error_retry(rdata, fp);
else
kref_get(&rdata->kref);
}
}
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
fc_rport_adisc_resp, rdata, lport->e_d_tov))
- fc_rport_error_retry(rdata, NULL);
+ fc_rport_error_retry(rdata, fp);
else
kref_get(&rdata->kref);
}
}
break;
case RPORT_ST_PRLI:
- case RPORT_ST_RTV:
case RPORT_ST_READY:
case RPORT_ST_ADISC:
FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
/* XXX TBD - should reset */
break;
case RPORT_ST_DELETE:
- case RPORT_ST_LOGO:
- case RPORT_ST_RESTART:
- FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
- fc_rport_state(rdata));
- mutex_unlock(&rdata->rp_mutex);
- rjt_data.reason = ELS_RJT_BUSY;
- rjt_data.explan = ELS_EXPL_NONE;
- goto reject;
+ default:
+ FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
+ rdata->rp_state);
+ fc_frame_free(rx_fp);
+ goto out;
}
/*
break;
case FC_TYPE_FCP:
fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
+ if (fcp_parm * FCP_SPPF_RETRY)
rdata->flags |= FC_RP_FLAGS_RETRY;
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
- fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
-
/*
- * If the remote port was created due to discovery, set state
- * to log back in. It may have seen a stale RSCN about us.
+ * If the remote port was created due to discovery,
+ * log back in. It may have seen a stale RSCN about us.
*/
- if (rdata->disc_id)
- fc_rport_state_enter(rdata, RPORT_ST_RESTART);
+ if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
mutex_unlock(&rdata->rp_mutex);
} else
FC_RPORT_ID_DBG(lport, sid,
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
- error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
+ error = scsi_add_host(shost, dev);
if (error)
goto out_put_shost;
pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
return error;
- }
- }
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
return error;
- }
- }
/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
* number of bytes required by each mapping. They are actually
int error = 0, i;
void *sense = NULL;
dma_addr_t sense_handle;
- unsigned long *sense_ptr;
+ u32 *sense_ptr;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
}
sense_ptr =
- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
+ (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
*sense_ptr = sense_handle;
}
* sense_ptr points to the location that has the user
* sense buffer address
*/
- sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
- ioc->sense_off);
+ sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
+ ioc->sense_off);
if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
sense, ioc->sense_len)) {
return retval;
}
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
+static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO,
megasas_sysfs_show_poll_mode_io,
megasas_sysfs_set_poll_mode_io);
#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
-#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
-#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
-#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
-#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
-#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
-#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
-#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086)
-#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087)
/* Manufacturing Page 0 */
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
PCI_ANY_ID, PCI_ANY_ID },
- /* Meteor ~ 2116 */
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
PCI_ANY_ID, PCI_ANY_ID },
- /* Thunderbolt ~ 2208 */
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8,
- PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, scsih_pci_table);
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
}
- if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
- &ha->pdev->dev)) {
+ if (scsi_add_host(vha->host, &fc_vport->dev)) {
DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
vha->host_no, vha->vp_idx));
goto vport_create_failed_2;
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
base_vha->host_no, ha));
+ base_vha->flags.init_done = 1;
+ base_vha->flags.online = 1;
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
- base_vha->flags.init_done = 1;
- base_vha->flags.online = 1;
-
ha->isp_ops->enable_intrs(ha);
scsi_scan_host(host);
{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
- {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
- {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
- {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
- {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "DF400", "*", BLIST_SPARSELUN},
+ {"HITACHI", "DF500", "*", BLIST_SPARSELUN},
+ {"HITACHI", "DF600", "*", BLIST_SPARSELUN},
+ {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
*/
req->next_rq->resid_len = scsi_in(cmd)->resid;
- scsi_release_buffers(cmd);
blk_end_request_all(req, 0);
+ scsi_release_buffers(cmd);
scsi_next_command(cmd);
return;
}
int nseg = 0;
if (scsi_sg_count(cmd)) {
- struct device *dev = cmd->device->host->dma_dev;
+ struct device *dev = cmd->device->host->shost_gendev.parent;
nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
cmd->sc_data_direction);
void scsi_dma_unmap(struct scsi_cmnd *cmd)
{
if (scsi_sg_count(cmd)) {
- struct device *dev = cmd->device->host->dma_dev;
+ struct device *dev = cmd->device->host->shost_gendev.parent;
dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
cmd->sc_data_direction);
return error;
error = transport_class_register(&fc_vport_class);
if (error)
- goto unreg_host_class;
+ return error;
error = transport_class_register(&fc_rport_class);
if (error)
- goto unreg_vport_class;
- error = transport_class_register(&fc_transport_class);
- if (error)
- goto unreg_rport_class;
- return 0;
-
-unreg_rport_class:
- transport_class_unregister(&fc_rport_class);
-unreg_vport_class:
- transport_class_unregister(&fc_vport_class);
-unreg_host_class:
- transport_class_unregister(&fc_host_class);
- return error;
+ return error;
+ return transport_class_register(&fc_transport_class);
}
static void __exit fc_transport_exit(void)
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
unsigned long flags;
- int do_callback = 0;
/*
* if a scan is pending, flush the SCSI Host work_q so that
* Avoid this call if we already called it when we preserved the
* rport for the binding.
*/
- spin_lock_irqsave(shost->host_lock, flags);
if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
- (i->f->dev_loss_tmo_callbk)) {
- rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
- do_callback = 1;
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- if (do_callback)
+ (i->f->dev_loss_tmo_callbk))
i->f->dev_loss_tmo_callbk(rport);
fc_bsg_remove(rport->rqst_q);
struct fc_internal *i = to_fc_internal(shost->transportt);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
unsigned long flags;
- int do_callback = 0;
spin_lock_irqsave(shost->host_lock, flags);
rport->roles = FC_PORT_ROLE_UNKNOWN;
rport->port_state = FC_PORTSTATE_NOTPRESENT;
rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
+ rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
/*
* Pre-emptively kill I/O rather than waiting for the work queue
spin_unlock_irqrestore(shost->host_lock, flags);
fc_terminate_rport_io(rport);
- spin_lock_irqsave(shost->host_lock, flags);
-
- if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */
-
- /* remove the identifiers that aren't used in the consisting binding */
- switch (fc_host->tgtid_bind_type) {
- case FC_TGTID_BIND_BY_WWPN:
- rport->node_name = -1;
- rport->port_id = -1;
- break;
- case FC_TGTID_BIND_BY_WWNN:
- rport->port_name = -1;
- rport->port_id = -1;
- break;
- case FC_TGTID_BIND_BY_ID:
- rport->node_name = -1;
- rport->port_name = -1;
- break;
- case FC_TGTID_BIND_NONE: /* to keep compiler happy */
- break;
- }
-
- /*
- * As this only occurs if the remote port (scsi target)
- * went away and didn't come back - we'll remove
- * all attached scsi devices.
- */
- rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
- fc_queue_work(shost, &rport->stgt_delete_work);
+ BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
- do_callback = 1;
+ /* remove the identifiers that aren't used in the consisting binding */
+ switch (fc_host->tgtid_bind_type) {
+ case FC_TGTID_BIND_BY_WWPN:
+ rport->node_name = -1;
+ rport->port_id = -1;
+ break;
+ case FC_TGTID_BIND_BY_WWNN:
+ rport->port_name = -1;
+ rport->port_id = -1;
+ break;
+ case FC_TGTID_BIND_BY_ID:
+ rport->node_name = -1;
+ rport->port_name = -1;
+ break;
+ case FC_TGTID_BIND_NONE: /* to keep compiler happy */
+ break;
}
- spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * As this only occurs if the remote port (scsi target)
+ * went away and didn't come back - we'll remove
+ * all attached scsi devices.
+ */
+ fc_queue_work(shost, &rport->stgt_delete_work);
/*
* Notify the driver that the rport is now dead. The LLDD will
*
* Note: we set the CALLBK_DONE flag above to correspond
*/
- if (do_callback && i->f->dev_loss_tmo_callbk)
+ if (i->f->dev_loss_tmo_callbk)
i->f->dev_loss_tmo_callbk(rport);
}
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_block(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
- if (session->recovery_tmo >= 0)
- queue_delayed_work(iscsi_eh_timer_workq,
- &session->recovery_work,
- session->recovery_tmo * HZ);
+ queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
+ session->recovery_tmo * HZ);
}
void iscsi_block_session(struct iscsi_cls_session *session)
switch (ev->u.set_param.param) {
case ISCSI_PARAM_SESS_RECOVERY_TMO:
sscanf(data, "%d", &value);
- session->recovery_tmo = value;
+ if (value != 0)
+ session->recovery_tmo = value;
break;
default:
err = transport->set_param(conn, ev->u.set_param.param,
SRpnt->waiting = waiting;
if (STp->buffer->do_dio) {
- mdata->page_order = 0;
mdata->nr_entries = STp->buffer->sg_segs;
mdata->pages = STp->buffer->mapped_pages;
} else {
- mdata->page_order = STp->buffer->reserved_page_order;
mdata->nr_entries =
DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
- mdata->pages = STp->buffer->reserved_pages;
- mdata->offset = 0;
+ STp->buffer->map_data.pages = STp->buffer->reserved_pages;
+ STp->buffer->map_data.offset = 0;
}
memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
priority |= __GFP_ZERO;
if (STbuffer->frp_segs) {
- order = STbuffer->reserved_page_order;
+ order = STbuffer->map_data.page_order;
b_size = PAGE_SIZE << order;
} else {
for (b_size = PAGE_SIZE, order = 0;
segs++;
}
STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
- STbuffer->reserved_page_order = order;
+ STbuffer->map_data.page_order = order;
return 1;
}
for (i=0; i < st_bp->frp_segs; i++)
memset(page_address(st_bp->reserved_pages[i]), 0,
- PAGE_SIZE << st_bp->reserved_page_order);
+ PAGE_SIZE << st_bp->map_data.page_order);
st_bp->cleared = 1;
}
/* Release the extra buffer */
static void normalize_buffer(struct st_buffer * STbuffer)
{
- int i, order = STbuffer->reserved_page_order;
+ int i, order = STbuffer->map_data.page_order;
for (i = 0; i < STbuffer->frp_segs; i++) {
__free_pages(STbuffer->reserved_pages[i], order);
}
STbuffer->frp_segs = 0;
STbuffer->sg_segs = 0;
- STbuffer->reserved_page_order = 0;
+ STbuffer->map_data.page_order = 0;
STbuffer->map_data.offset = 0;
}
static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
{
int i, cnt, res, offset;
- int length = PAGE_SIZE << st_bp->reserved_page_order;
+ int length = PAGE_SIZE << st_bp->map_data.page_order;
for (i = 0, offset = st_bp->buffer_bytes;
i < st_bp->frp_segs && offset >= length; i++)
static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
{
int i, cnt, res, offset;
- int length = PAGE_SIZE << st_bp->reserved_page_order;
+ int length = PAGE_SIZE << st_bp->map_data.page_order;
for (i = 0, offset = st_bp->read_pointer;
i < st_bp->frp_segs && offset >= length; i++)
{
int src_seg, dst_seg, src_offset = 0, dst_offset;
int count, total;
- int length = PAGE_SIZE << st_bp->reserved_page_order;
+ int length = PAGE_SIZE << st_bp->map_data.page_order;
if (offset == 0)
return;
}
mdata->offset = uaddr & ~PAGE_MASK;
+ mdata->page_order = 0;
STbp->mapped_pages = pages;
return nr_pages;
struct st_request *last_SRpnt;
struct st_cmdstatus cmdstat;
struct page **reserved_pages;
- int reserved_page_order;
struct page **mapped_pages;
struct rq_map_data map_data;
unsigned char *b_data;
#define PASS_LIMIT 256
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-
/*
* We default to IRQ0 for the "no irq" hack. Some
* machine types want others as well - they're free
serial_out(up, UART_IER, up->ier);
if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
+ unsigned char lsr, iir;
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ iir = serial_in(up, UART_IIR) & 0x0f;
if ((up->port.type == PORT_RM9000) ?
- (lsr & UART_LSR_THRE) :
- (lsr & UART_LSR_TEMT))
+ (lsr & UART_LSR_THRE &&
+ (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) :
+ (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT))
transmit_chars(up);
}
}
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
spin_unlock_irqrestore(&up->port.lock, flags);
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
}
static unsigned int serial8250_get_mctrl(struct uart_port *port)
spin_unlock_irqrestore(&up->port.lock, flags);
}
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
/*
* Wait for transmitter & holding register to empty
*/
/* U.S. Robotics 56K Voice INT PnP*/
{ "USR9190", 0 },
/* Wacom tablets */
- { "WACFXXX", 0 },
+ { "WACF004", 0 },
+ { "WACF005", 0 },
+ { "WACF006", 0 },
+ { "WACF007", 0 },
+ { "WACF008", 0 },
+ { "WACF009", 0 },
+ { "WACF00A", 0 },
+ { "WACF00B", 0 },
+ { "WACF00C", 0 },
/* Compaq touchscreen */
{ "FPI2002", 0 },
/* Fujitsu Stylistic touchscreens */
{ "FUJ02E5", 0 },
/* Fujitsu P-series tablet PC device */
{ "FUJ02E6", 0 },
- /* Fujitsu Wacom 2FGT Tablet PC device */
- { "FUJ02E7", 0 },
/*
* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
* disguise)
spin_unlock_irqrestore(&port->lock, flags);
}
-static int __devinit ulite_console_setup(struct console *co, char *options)
+static int __init ulite_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
#include "ssb_private.h"
-#include <linux/ctype.h>
-
static const struct ssb_sprom *fallback_sprom;
static int hex2sprom(u16 *sprom, const char *dump, size_t len,
size_t sprom_size_words)
{
- char c, tmp[5] = { 0 };
- int err, cnt = 0;
+ char tmp[5] = { 0 };
+ int cnt = 0;
unsigned long parsed;
- /* Strip whitespace at the end. */
- while (len) {
- c = dump[len - 1];
- if (!isspace(c) && c != '\0')
- break;
- len--;
- }
- /* Length must match exactly. */
- if (len != sprom_size_words * 4)
+ if (len < sprom_size_words * 2)
return -EINVAL;
while (cnt < sprom_size_words) {
memcpy(tmp, dump, 4);
dump += 4;
- err = strict_strtoul(tmp, 16, &parsed);
- if (err)
- return err;
+ parsed = simple_strtoul(tmp, NULL, 16);
sprom[cnt++] = swab16((u16)parsed);
}
{
struct usb_interface *intf = to_usb_interface(dev);
struct asus_oled_dev *odev = usb_get_intfdata(intf);
- unsigned long value;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ int temp = strict_strtoul(buf, 10, NULL);
- enable_oled(odev, value);
+ enable_oled(odev, temp);
return count;
}
{
struct asus_oled_dev *odev =
(struct asus_oled_dev *) dev_get_drvdata(device);
- unsigned long value;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ int temp = strict_strtoul(buf, 10, NULL);
- enable_oled(odev, value);
+ enable_oled(odev, temp);
return count;
}
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
-void HvSynicInit(void *irqarg)
+int HvSynicInit(u32 irqVector)
{
u64 version;
union hv_synic_simp simp;
union hv_synic_sint sharedSint;
union hv_synic_scontrol sctrl;
u64 guestID;
- u32 irqVector = *((u32 *)(irqarg));
- int cpu = smp_processor_id();
+ int ret = 0;
DPRINT_ENTER(VMBUS);
if (!gHvContext.HypercallPage) {
DPRINT_EXIT(VMBUS);
- return;
+ return ret;
}
/* Check the version */
*/
rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID);
if (guestID == HV_LINUX_GUEST_ID) {
- gHvContext.synICMessagePage[cpu] =
+ gHvContext.synICMessagePage[0] =
phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT);
- gHvContext.synICEventPage[cpu] =
+ gHvContext.synICEventPage[0] =
phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT);
} else {
DPRINT_ERR(VMBUS, "unknown guest id!!");
goto Cleanup;
}
DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p",
- gHvContext.synICMessagePage[cpu],
- gHvContext.synICEventPage[cpu]);
+ gHvContext.synICMessagePage[0],
+ gHvContext.synICEventPage[0]);
} else {
- gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
- if (gHvContext.synICMessagePage[cpu] == NULL) {
+ gHvContext.synICMessagePage[0] = osd_PageAlloc(1);
+ if (gHvContext.synICMessagePage[0] == NULL) {
DPRINT_ERR(VMBUS,
"unable to allocate SYNIC message page!!");
goto Cleanup;
}
- gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
- if (gHvContext.synICEventPage[cpu] == NULL) {
+ gHvContext.synICEventPage[0] = osd_PageAlloc(1);
+ if (gHvContext.synICEventPage[0] == NULL) {
DPRINT_ERR(VMBUS,
"unable to allocate SYNIC event page!!");
goto Cleanup;
/* Setup the Synic's message page */
rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
simp.SimpEnabled = 1;
- simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
+ simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0])
>> PAGE_SHIFT;
DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx",
/* Setup the Synic's event page */
rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
siefp.SiefpEnabled = 1;
- siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
+ siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0])
>> PAGE_SHIFT;
DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx",
DPRINT_EXIT(VMBUS);
- return;
+ return ret;
Cleanup:
+ ret = -1;
+
if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
- if (gHvContext.synICEventPage[cpu])
- osd_PageFree(gHvContext.synICEventPage[cpu], 1);
+ if (gHvContext.synICEventPage[0])
+ osd_PageFree(gHvContext.synICEventPage[0], 1);
- if (gHvContext.synICMessagePage[cpu])
- osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
+ if (gHvContext.synICMessagePage[0])
+ osd_PageFree(gHvContext.synICMessagePage[0], 1);
}
DPRINT_EXIT(VMBUS);
- return;
+
+ return ret;
}
/**
* HvSynicCleanup - Cleanup routine for HvSynicInit().
*/
-void HvSynicCleanup(void *arg)
+void HvSynicCleanup(void)
{
union hv_synic_sint sharedSint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
- int cpu = smp_processor_id();
DPRINT_ENTER(VMBUS);
sharedSint.Masked = 1;
- /* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
- osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
- osd_PageFree(gHvContext.synICEventPage[cpu], 1);
+ osd_PageFree(gHvContext.synICMessagePage[0], 1);
+ osd_PageFree(gHvContext.synICEventPage[0], 1);
}
DPRINT_EXIT(VMBUS);
},
};
-#define MAX_NUM_CPUS 32
+#define MAX_NUM_CPUS 1
struct hv_input_signal_event_buffer {
extern u16 HvSignalEvent(void);
-extern void HvSynicInit(void *irqarg);
+extern int HvSynicInit(u32 irqVector);
-extern void HvSynicCleanup(void *arg);
+extern void HvSynicCleanup(void);
#endif /* __HV_H__ */
/* strcpy(dev->name, "vmbus"); */
/* SynIC setup... */
- on_each_cpu(HvSynicInit, (void *)irqvector, 1);
+ ret = HvSynicInit(*irqvector);
/* Connect to VMBus in the root partition */
ret = VmbusConnect();
DPRINT_ENTER(VMBUS);
VmbusChannelReleaseUnattachedChannels();
VmbusDisconnect();
- on_each_cpu(HvSynicCleanup, NULL, 1);
+ HvSynicCleanup();
DPRINT_EXIT(VMBUS);
return ret;
*/
static void VmbusOnMsgDPC(struct hv_driver *drv)
{
- int cpu = smp_processor_id();
- void *page_addr = gHvContext.synICMessagePage[cpu];
+ void *page_addr = gHvContext.synICMessagePage[0];
struct hv_message *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
struct hv_message *copied;
static int VmbusOnISR(struct hv_driver *drv)
{
int ret = 0;
- int cpu = smp_processor_id();
void *page_addr;
struct hv_message *msg;
union hv_synic_event_flags *event;
- page_addr = gHvContext.synICMessagePage[cpu];
+ page_addr = gHvContext.synICMessagePage[0];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
DPRINT_ENTER(VMBUS);
}
/* TODO: Check if there are events to be process */
- page_addr = gHvContext.synICEventPage[cpu];
+ page_addr = gHvContext.synICEventPage[0];
event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
/* Since we are a child, we only need to check bit 0 */
struct sk_buff *frag,
int hdr_len);
-extern int ieee80211_rtl_xmit(struct sk_buff *skb,
+extern int ieee80211_xmit(struct sk_buff *skb,
struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
-extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
+extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
+extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn);
-extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee);
+extern void ieee80211_start_scan(struct ieee80211_device *ieee);
//Add for RF power on power off by lizhaoming 080512
extern void SendDisassociation(struct ieee80211_device *ieee,
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
-int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
}
/* called with ieee->lock held */
-void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
+void ieee80211_start_scan(struct ieee80211_device *ieee)
{
if(IS_DOT11D_ENABLE(ieee) )
{
}
}
-void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
{
u8 *c;
struct sk_buff *skb;
ieee80211_associate_step2(ieee);
}else{
- ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
+ ieee80211_auth_challenge(ieee, challenge, chlen);
}
}else{
ieee->softmac_stats.rx_auth_rs_err++;
}
-void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
+void ieee80211_wake_queue(struct ieee80211_device *ieee)
{
unsigned long flags;
}
-void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
+void ieee80211_stop_queue(struct ieee80211_device *ieee)
{
//unsigned long flags;
//spin_lock_irqsave(&ieee->lock,flags);
//#else
if (ieee->state == IEEE80211_NOLINK){
ieee->actscanning = true;
- ieee80211_rtl_start_scan(ieee);
+ ieee80211_start_scan(ieee);
}
//#endif
spin_unlock_irqrestore(&ieee->lock, flags);
if(ieee->state == IEEE80211_NOLINK){
ieee->beinretry = false;
ieee->actscanning = true;
- ieee80211_rtl_start_scan(ieee);
+ ieee80211_start_scan(ieee);
}
//YJ,add,080828, notify os here
if(ieee->state == IEEE80211_NOLINK)
}
/* SKBs are added to the ieee->tx_queue. */
-int ieee80211_rtl_xmit(struct sk_buff *skb,
+int ieee80211_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
if(priv->rx_skb->len > 4)
skb_trim(priv->rx_skb,priv->rx_skb->len-4);
#ifndef RX_DONT_PASS_UL
- if(!ieee80211_rtl_rx(priv->ieee80211,
+ if(!ieee80211_rx(priv->ieee80211,
priv->rx_skb, &stats)){
#endif // RX_DONT_PASS_UL
if (!check_nic_enought_desc(dev, priority)){
DMESGW("Error: no descriptor left by previous TX (avail %d) ",
get_curr_tx_free_desc(dev, priority));
- ieee80211_rtl_stop_queue(priv->ieee80211);
+ ieee80211_stop_queue(priv->ieee80211);
}
rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate);
if (!check_nic_enought_desc(dev, priority))
- ieee80211_rtl_stop_queue(priv->ieee80211);
+ ieee80211_stop_queue(priv->ieee80211);
spin_unlock_irqrestore(&priv->tx_lock,flags);
}
.ndo_set_mac_address = r8180_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
- .ndo_start_xmit = ieee80211_rtl_xmit,
+ .ndo_start_xmit = ieee80211_xmit,
};
static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
spin_unlock_irqrestore(&priv->tx_lock,flags);
if(enough_desc)
- ieee80211_rtl_wake_queue(priv->ieee80211);
+ ieee80211_wake_queue(priv->ieee80211);
}
void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
// queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq);
//printk("start scan============================>\n");
ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
-//ieee80211_rtl_start_scan(priv->ieee80211);
+//ieee80211_start_scan(priv->ieee80211);
/* intentionally forget to up sem */
// up(&priv->ieee80211->wx_sem);
ret = 0;
n_bytes = roundup(12 + this_part, 4);
memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
- do {
- retval = usb_bulk_msg(data->usb_dev,
- usb_sndbulkpipe(data->usb_dev,
- data->bulk_out),
- buffer, n_bytes,
- &actual, USBTMC_TIMEOUT);
- if (retval != 0)
- break;
- n_bytes -= actual;
- } while (n_bytes);
+ retval = usb_bulk_msg(data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev,
+ data->bulk_out),
+ buffer, n_bytes, &actual, USBTMC_TIMEOUT);
data->bTag_last_write = data->bTag;
data->bTag++;
return 0;
/* allocate 2^1 pages = 8K (on i386);
* should be more than enough for one device */
- pages_start = (char *)__get_free_pages(GFP_NOIO, 1);
+ pages_start = (char *)__get_free_pages(GFP_KERNEL, 1);
if (!pages_start)
return -ENOMEM;
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer && urb->actual_length)
+ if (as->userbuffer)
if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->actual_length))
+ urb->transfer_buffer_length))
goto err_out;
if (put_user(as->status, &userurb->status))
goto err_out;
}
}
+ free_async(as);
+
if (put_user(addr, (void __user * __user *)arg))
return -EFAULT;
return 0;
err_out:
+ free_async(as);
return -EFAULT;
}
static int proc_reapurb(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
- if (as) {
- int retval = processcompl(as, (void __user * __user *)arg);
- free_async(as);
- return retval;
- }
+ if (as)
+ return processcompl(as, (void __user * __user *)arg);
if (signal_pending(current))
return -EINTR;
return -EIO;
static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
{
- int retval;
struct async *as;
- as = async_getcompleted(ps);
- retval = -EAGAIN;
- if (as) {
- retval = processcompl(as, (void __user * __user *)arg);
- free_async(as);
- }
- return retval;
+ if (!(as = async_getcompleted(ps)))
+ return -EAGAIN;
+ return processcompl(as, (void __user * __user *)arg);
}
#ifdef CONFIG_COMPAT
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer && urb->actual_length)
+ if (as->userbuffer)
if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->actual_length))
+ urb->transfer_buffer_length))
return -EFAULT;
if (put_user(as->status, &userurb->status))
return -EFAULT;
}
}
+ free_async(as);
if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
return -EFAULT;
return 0;
static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
- if (as) {
- int retval = processcompl_compat(as, (void __user * __user *)arg);
- free_async(as);
- return retval;
- }
+ if (as)
+ return processcompl_compat(as, (void __user * __user *)arg);
if (signal_pending(current))
return -EINTR;
return -EIO;
static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
{
- int retval;
struct async *as;
- retval = -EAGAIN;
- as = async_getcompleted(ps);
- if (as) {
- retval = processcompl_compat(as, (void __user * __user *)arg);
- free_async(as);
- }
- return retval;
+ if (!(as = async_getcompleted(ps)))
+ return -EAGAIN;
+ return processcompl_compat(as, (void __user * __user *)arg);
}
#endif
#endif
/**
- * usb_enumerate_device_otg - FIXME (usbcore-internal)
+ * usb_configure_device_otg - FIXME (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * Finish enumeration for On-The-Go devices
+ * Do configuration for On-The-Go devices
*/
-static int usb_enumerate_device_otg(struct usb_device *udev)
+static int usb_configure_device_otg(struct usb_device *udev)
{
int err = 0;
/**
- * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
+ * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* This is only called by usb_new_device() and usb_authorize_device()
* the string descriptors, as they will be errored out by the device
* until it has been authorized.
*/
-static int usb_enumerate_device(struct usb_device *udev)
+static int usb_configure_device(struct usb_device *udev)
{
int err;
udev->descriptor.iManufacturer);
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
}
- err = usb_enumerate_device_otg(udev);
+ err = usb_configure_device_otg(udev);
fail:
return err;
}
* usb_new_device - perform initial device setup (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * This is called with devices which have been detected but not fully
- * enumerated. The device descriptor is available, but not descriptors
+ * This is called with devices which have been enumerated, but not yet
+ * configured. The device descriptor is available, but not descriptors
* for any device configuration. The caller must have locked either
* the parent hub (if udev is a normal device) or else the
* usb_bus_list_lock (if udev is a root hub). The parent's pointer to
if (udev->parent)
usb_autoresume_device(udev->parent);
- usb_detect_quirks(udev);
- err = usb_enumerate_device(udev); /* Read descriptors */
+ usb_detect_quirks(udev); /* Determine quirks */
+ err = usb_configure_device(udev); /* detect & probe dev/intfs */
if (err < 0)
goto fail;
dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
*/
int usb_deauthorize_device(struct usb_device *usb_dev)
{
+ unsigned cnt;
usb_lock_device(usb_dev);
if (usb_dev->authorized == 0)
goto out_unauthorized;
-
usb_dev->authorized = 0;
usb_set_configuration(usb_dev, -1);
-
- kfree(usb_dev->product);
usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- kfree(usb_dev->manufacturer);
usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- kfree(usb_dev->serial);
usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
-
- usb_destroy_configuration(usb_dev);
+ kfree(usb_dev->config);
+ usb_dev->config = NULL;
+ for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
+ kfree(usb_dev->rawdescriptors[cnt]);
usb_dev->descriptor.bNumConfigurations = 0;
-
+ kfree(usb_dev->rawdescriptors);
out_unauthorized:
usb_unlock_device(usb_dev);
return 0;
int usb_authorize_device(struct usb_device *usb_dev)
{
int result = 0, c;
-
usb_lock_device(usb_dev);
if (usb_dev->authorized == 1)
goto out_authorized;
-
+ kfree(usb_dev->product);
+ usb_dev->product = NULL;
+ kfree(usb_dev->manufacturer);
+ usb_dev->manufacturer = NULL;
+ kfree(usb_dev->serial);
+ usb_dev->serial = NULL;
result = usb_autoresume_device(usb_dev);
if (result < 0) {
dev_err(&usb_dev->dev,
"authorization: %d\n", result);
goto error_device_descriptor;
}
-
- kfree(usb_dev->product);
- usb_dev->product = NULL;
- kfree(usb_dev->manufacturer);
- usb_dev->manufacturer = NULL;
- kfree(usb_dev->serial);
- usb_dev->serial = NULL;
-
usb_dev->authorized = 1;
- result = usb_enumerate_device(usb_dev);
+ result = usb_configure_device(usb_dev);
if (result < 0)
- goto error_enumerate;
+ goto error_configure;
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
}
}
dev_info(&usb_dev->dev, "authorized to connect\n");
-
-error_enumerate:
+error_configure:
error_device_descriptor:
- usb_autosuspend_device(usb_dev);
error_autoresume:
out_authorized:
usb_unlock_device(usb_dev); // complements locktree
USB_PORT_FEAT_C_SUSPEND);
udev = hdev->children[i-1];
if (udev) {
- /* TRSMRCY = 10 msec */
- msleep(10);
-
usb_lock_device(udev);
ret = remote_wakeup(hdev->
children[i-1]);
if (index <= 0)
return NULL;
- buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
+ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
if (buf) {
len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
if (len > 0) {
- smallbuf = kmalloc(++len, GFP_NOIO);
+ smallbuf = kmalloc(++len, GFP_KERNEL);
if (!smallbuf)
return buf;
memcpy(smallbuf, buf, len);
if (cp) {
nintf = cp->desc.bNumInterfaces;
new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
- GFP_NOIO);
+ GFP_KERNEL);
if (!new_interfaces) {
dev_err(&dev->dev, "Out of memory\n");
return -ENOMEM;
for (; n < nintf; ++n) {
new_interfaces[n] = kzalloc(
sizeof(struct usb_interface),
- GFP_NOIO);
+ GFP_KERNEL);
if (!new_interfaces[n]) {
dev_err(&dev->dev, "Out of memory\n");
ret = -ENOMEM;
struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
- int retval; \
\
udev = to_usb_device(dev); \
- usb_lock_device(udev); \
- retval = sprintf(buf, "%s\n", udev->name); \
- usb_unlock_device(udev); \
- return retval; \
+ return sprintf(buf, "%s\n", udev->name); \
} \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
case USB_SPEED_HIGH:
speed = "480";
break;
- case USB_SPEED_VARIABLE:
- speed = "480";
- break;
- case USB_SPEED_SUPER:
- speed = "5000";
- break;
default:
speed = "unknown";
}
struct find_interface_arg {
int minor;
- struct device_driver *drv;
+ struct usb_interface *interface;
};
static int __find_interface(struct device *dev, void *data)
if (!is_usb_interface(dev))
return 0;
- if (dev->driver != arg->drv)
- return 0;
intf = to_usb_interface(dev);
- return intf->minor == arg->minor;
+ if (intf->minor != -1 && intf->minor == arg->minor) {
+ arg->interface = intf;
+ return 1;
+ }
+ return 0;
}
/**
* @drv: the driver whose current configuration is considered
* @minor: the minor number of the desired device
*
- * This walks the bus device list and returns a pointer to the interface
- * with the matching minor and driver. Note, this only works for devices
- * that share the USB major number.
+ * This walks the driver device list and returns a pointer to the interface
+ * with the matching minor. Note, this only works for devices that share the
+ * USB major number.
*/
struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
{
struct find_interface_arg argb;
- struct device *dev;
+ int retval;
argb.minor = minor;
- argb.drv = &drv->drvwrap.driver;
-
- dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
-
- /* Drop reference count from bus_find_device */
- put_device(dev);
-
- return dev ? to_usb_interface(dev) : NULL;
+ argb.interface = NULL;
+ /* eat the error, it will be in argb.interface */
+ retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb,
+ __find_interface);
+ return argb.interface;
}
EXPORT_SYMBOL_GPL(usb_find_interface);
/* start 20 msec resume signaling from this port,
* and make khubd collect PORT_STAT_C_SUSPEND to
- * stop that signaling. Use 5 ms extra for safety,
- * like usb_port_resume() does.
+ * stop that signaling.
*/
- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
+ ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
del_timer_sync(&ehci->watchdog);
del_timer_sync(&ehci->iaa_watchdog);
+ port = HCS_N_PORTS (ehci->hcs_params);
spin_lock_irq (&ehci->lock);
- /* Once the controller is stopped, port resumes that are already
- * in progress won't complete. Hence if remote wakeup is enabled
- * for the root hub and any ports are in the middle of a resume or
- * remote wakeup, we must fail the suspend.
- */
- if (hcd->self.root_hub->do_remote_wakeup) {
- port = HCS_N_PORTS(ehci->hcs_params);
- while (port--) {
- if (ehci->reset_done[port] != 0) {
- spin_unlock_irq(&ehci->lock);
- ehci_dbg(ehci, "suspend failed because "
- "port %d is resuming\n",
- port + 1);
- return -EBUSY;
- }
- }
- }
-
/* stop schedules, clean any completed work */
if (HC_IS_RUNNING(hcd->state)) {
ehci_quiesce (ehci);
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
- port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
- urb->interval = 1;
- } else if (qh->period > ehci->periodic_size) {
- qh->period = ehci->periodic_size;
- urb->interval = qh->period << 3;
+ dbg ("intr period %d uframes, NYET!",
+ urb->interval);
+ goto done;
}
} else {
int think_time;
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
qh->period = urb->interval;
- if (qh->period > ehci->periodic_size) {
- qh->period = ehci->periodic_size;
- urb->interval = qh->period;
- }
}
}
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/mm.h>
#include <linux/irq.h>
-#include <asm/cacheflush.h>
#include "../core/hcd.h"
#include "r8a66597.h"
{
int port;
- /* disable interrupts */
r8a66597_write(r8a66597, 0, INTENB0);
- r8a66597_write(r8a66597, 0, INTENB1);
- r8a66597_write(r8a66597, 0, BRDYENB);
- r8a66597_write(r8a66597, 0, BEMPENB);
- r8a66597_write(r8a66597, 0, NRDYENB);
-
- /* clear status */
- r8a66597_write(r8a66597, 0, BRDYSTS);
- r8a66597_write(r8a66597, 0, NRDYSTS);
- r8a66597_write(r8a66597, 0, BEMPSTS);
+ r8a66597_write(r8a66597, 0, INTSTS0);
for (port = 0; port < r8a66597->max_root_hub; port++)
r8a66597_disable_port(r8a66597, port);
enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
}
-static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
- int status)
-__releases(r8a66597->lock)
-__acquires(r8a66597->lock)
-{
- if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
- void *ptr;
-
- for (ptr = urb->transfer_buffer;
- ptr < urb->transfer_buffer + urb->transfer_buffer_length;
- ptr += PAGE_SIZE)
- flush_dcache_page(virt_to_page(ptr));
- }
-
- usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
- spin_unlock(&r8a66597->lock);
- usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
- spin_lock(&r8a66597->lock);
-}
-
/* this function must be called with interrupt disabled */
static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
{
list_del(&td->queue);
kfree(td);
- if (urb)
- r8a66597_urb_done(r8a66597, urb, -ENODEV);
+ if (urb) {
+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
+ urb);
+ spin_unlock(&r8a66597->lock);
+ usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
+ -ENODEV);
+ spin_lock(&r8a66597->lock);
+ }
break;
}
}
if (usb_pipeisoc(urb->pipe))
urb->start_frame = r8a66597_get_frame(hcd);
- r8a66597_urb_done(r8a66597, urb, status);
+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
+ spin_unlock(&r8a66597->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&r8a66597->lock);
}
if (restart) {
r8a66597->rh_timer.data = (unsigned long)r8a66597;
r8a66597->reg = (unsigned long)reg;
- /* make sure no interrupts are pending */
- ret = r8a66597_clock_enable(r8a66597);
- if (ret < 0)
- goto clean_up3;
- disable_controller(r8a66597);
-
for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
init_timer(&r8a66597->td_timer[i]);
spin_lock_irq(&uhci->lock);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
rc = -ESHUTDOWN;
- else if (uhci->dead)
- ; /* Dead controllers tell no tales */
-
- /* Once the controller is stopped, port resumes that are already
- * in progress won't complete. Hence if remote wakeup is enabled
- * for the root hub and any ports are in the middle of a resume or
- * remote wakeup, we must fail the suspend.
- */
- else if (hcd->self.root_hub->do_remote_wakeup &&
- uhci->resuming_ports) {
- dev_dbg(uhci_dev(uhci), "suspend failed because a port "
- "is resuming\n");
- rc = -EBUSY;
- } else
+ else if (!uhci->dead)
suspend_rh(uhci, UHCI_RH_SUSPENDED);
spin_unlock_irq(&uhci->lock);
return rc;
/* Port received a wakeup request */
set_bit(port, &uhci->resuming_ports);
uhci->ports_timeout = jiffies +
- msecs_to_jiffies(25);
+ msecs_to_jiffies(20);
/* Make sure we see the port again
* after the resuming period is over. */
struct usb_device *udev; /* usb device */
struct urb *urb; /* usb request block */
struct backlight_device *bd; /* backlight device */
- u8 *urbdata; /* interrupt URB data buffer */
- u8 *msgdata; /* control message data buffer */
+ char *urbdata; /* interrupt URB data buffer */
+ char *msgdata; /* control message data buffer */
struct delayed_work work;
int button_pressed;
err("%s - error loading firmware: error = %d", __func__, err);
goto wraperr;
}
- } while (rec);
+ } while (i > 0);
/* Assert reset (stop the CPU in the EMI) */
err = emi62_set_reset(dev,1);
static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
{
musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
}
/*
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
break;
default:
- ERR("SetupEnd came in a wrong ep0stage %s\n",
+ ERR("SetupEnd came in a wrong ep0stage %s",
decode_ep0stage(musb->ep0_state));
}
csr = musb_readw(regs, MUSB_CSR0);
handled = service_zero_data_request(
musb, &setup);
- /*
- * We're expecting no data in any case, so
- * always set the DATAEND bit -- doing this
- * here helps avoid SetupEnd interrupt coming
- * in the idle stage when we're stalling...
- */
- musb->ackpend |= MUSB_CSR0_P_DATAEND;
-
/* status stage might be immediate */
- if (handled > 0)
+ if (handled > 0) {
+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
musb->ep0_state =
MUSB_EP0_STAGE_STATUSIN;
+ }
break;
/* sequence #1 (IN to host), includes GET_STATUS
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) },
- { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) },
{ USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
-#define BANDB_USOPTL4_PID 0xAC11
-#define BANDB_USPTL4_PID 0xAC12
-#define BANDB_USO9ML2DR_2_PID 0xAC16
-#define BANDB_USO9ML2DR_PID 0xAC17
-#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
-#define BANDB_USOPTL4DR_PID 0xAC19
-#define BANDB_485USB9F_2W_PID 0xAC25
-#define BANDB_485USB9F_4W_PID 0xAC26
-#define BANDB_232USB9M_PID 0xAC27
-#define BANDB_485USBTB_2W_PID 0xAC33
-#define BANDB_485USBTB_4W_PID 0xAC34
-#define BANDB_TTL5USB9M_PID 0xAC49
-#define BANDB_TTL3USB9M_PID 0xAC50
-#define BANDB_ZZ_PROG1_USB_PID 0xBA02
/*
* RM Michaelides CANview USB (http://www.rmcan.com)
dbg("%s - port %d", __func__, port->number);
if (port->serial->type->max_in_flight_urbs) {
- kfree(urb->transfer_buffer);
-
spin_lock_irqsave(&port->lock, flags);
--port->urbs_in_flight;
port->tx_bytes_flight -= urb->transfer_buffer_length;
* moschip_id_table_combined
*/
#define USB_VENDOR_ID_BANDB 0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
-#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
static struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
-/* Haier products */
-#define HAIER_VENDOR_ID 0x201e
-#define HAIER_PRODUCT_CE100 0x2009
-
-/* Thinkwill products */
-#define THINKWILL_VENDOR_ID 0x19f5
-#define THINKWILL_PRODUCT_ID 0x9909
-
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
- { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
- { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
* to wait for at least one CHECK_CONDITION to determine
* SANE_SENSE support
*/
- if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
+ if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
result == USB_STOR_TRANSPORT_GOOD &&
!(us->fflags & US_FL_SANE_SENSE) &&
- !(us->fflags & US_FL_BAD_SENSE) &&
- !(srb->cmnd[2] & 0x20))) {
+ !(srb->cmnd[2] & 0x20)) {
US_DEBUGP("-- SAT supported, increasing auto-sense\n");
us->fflags |= US_FL_SANE_SENSE;
}
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
US_DEBUGP("-- auto-sense aborted\n");
srb->result = DID_ABORT << 16;
-
- /* If SANE_SENSE caused this problem, disable it */
- if (sense_size != US_SENSE_SIZE) {
- us->fflags &= ~US_FL_SANE_SENSE;
- us->fflags |= US_FL_BAD_SENSE;
- }
goto Handle_Errors;
}
* (small) sense request. This fixes some USB GSM modems
*/
if (temp_result == USB_STOR_TRANSPORT_FAILED &&
- sense_size != US_SENSE_SIZE) {
+ (us->fflags & US_FL_SANE_SENSE) &&
+ sense_size != US_SENSE_SIZE) {
US_DEBUGP("-- auto-sense failure, retry small sense\n");
sense_size = US_SENSE_SIZE;
- us->fflags &= ~US_FL_SANE_SENSE;
- us->fflags |= US_FL_BAD_SENSE;
goto Retry_Sense;
}
*/
if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
!(us->fflags & US_FL_SANE_SENSE) &&
- !(us->fflags & US_FL_BAD_SENSE) &&
(srb->sense_buffer[0] & 0x7C) == 0x70) {
US_DEBUGP("-- SANE_SENSE support enabled\n");
us->fflags |= US_FL_SANE_SENSE;
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
-/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */
-UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100,
- "Prolific Technology, Inc.",
- "Prolific Storage Gadget",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_BAD_SENSE ),
-
/* Reported by Rogerio Brito <rbrito@ime.usp.br> */
UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
"Prolific Technology, Inc.",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_GO_SLOW ),
+/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
+UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
+ "INTOVA",
+ "Pixtreme",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
* Mio Moov 330
*/
if (data_len<36) // You lose.
return;
- memset(data+8, ' ', 28);
if(data[0]&0x20) { /* USB device currently not connected. Return
peripheral qualifier 001b ("...however, the
physical device is not currently connected
device, it may return zeros or ASCII spaces
(20h) in those fields until the data is
available from the device."). */
+ memset(data+8,0,28);
} else {
u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
- int n;
-
- n = strlen(us->unusual_dev->vendorName);
- memcpy(data+8, us->unusual_dev->vendorName, min(8, n));
- n = strlen(us->unusual_dev->productName);
- memcpy(data+16, us->unusual_dev->productName, min(16, n));
-
+ memcpy(data+8, us->unusual_dev->vendorName,
+ strlen(us->unusual_dev->vendorName) > 8 ? 8 :
+ strlen(us->unusual_dev->vendorName));
+ memcpy(data+16, us->unusual_dev->productName,
+ strlen(us->unusual_dev->productName) > 16 ? 16 :
+ strlen(us->unusual_dev->productName));
data[32] = 0x30 + ((bcdDevice>>12) & 0x0F);
data[33] = 0x30 + ((bcdDevice>>8) & 0x0F);
data[34] = 0x30 + ((bcdDevice>>4) & 0x0F);
u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
unsigned f = 0;
- unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
- US_FL_FIX_CAPACITY |
+ unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
case 'a':
f |= US_FL_SANE_SENSE;
break;
- case 'b':
- f |= US_FL_BAD_SENSE;
- break;
case 'c':
f |= US_FL_FIX_CAPACITY;
break;
*/
static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
{
- struct fb_info *info = platform_get_drvdata(dev);
- struct imxfb_info *fbi = info->par;
+ struct imxfb_info *fbi = platform_get_drvdata(dev);
pr_debug("%s\n", __func__);
static int imxfb_resume(struct platform_device *dev)
{
- struct fb_info *info = platform_get_drvdata(dev);
- struct imxfb_info *fbi = info->par;
+ struct imxfb_info *fbi = platform_get_drvdata(dev);
pr_debug("%s\n", __func__);
M1064_XDVICLKCTRL_C1DVICLKEN |
M1064_XDVICLKCTRL_DVILOOPCTL |
M1064_XDVICLKCTRL_P1LOOPBWDTCTL;
- /* Setting this breaks PC systems so don't do it */
- /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */
+ matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp);
matroxfb_DAC_out(minfo, M1064_XPWRCTRL,
xpwrctrl);
unsigned long flags;
dma_cookie_t cookie;
- if (mx3_fbi->txd)
- dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
- to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
- else
- dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
+ dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
+ to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
/* This enables the channel */
if (mx3_fbi->cookie < 0) {
static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
{
- dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
/* This might be board-specific */
mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
return;
goto ersdc0;
}
- mx3fb->backlight_level = 255;
-
ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
if (ret < 0)
goto eisdc0;
+ mx3fb->backlight_level = 255;
+
return 0;
eisdc0:
/**
* s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
+ * @id: window id.
* @sfb: The hardware state.
* @pixclock: The pixel clock wanted, in picoseconds.
*
* Given the specified pixel clock, work out the necessary divider to get
* close to the output frequency.
*/
-static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
+static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk)
{
+ struct s3c_fb_pd_win *win = sfb->pdata->win[id];
unsigned long clk = clk_get_rate(sfb->bus_clk);
- unsigned long long tmp;
unsigned int result;
- tmp = (unsigned long long)clk;
- tmp *= pixclk;
-
- do_div(tmp, 1000000000UL);
- result = (unsigned int)tmp / 1000;
+ pixclk *= win->win_mode.refresh;
+ result = clk / pixclk;
dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
pixclk, clk, result, clk / result);
/* use window 0 as the basis for the lcd output timings */
if (win_no == 0) {
- clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
+ clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock);
data = sfb->pdata->vidcon0;
data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
/*
- * intel TCO Watchdog Driver
+ * intel TCO Watchdog Driver (Used in i82801 and i63xxESB chipsets)
*
* (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
*
*
* The TCO watchdog is implemented in the following I/O controller hubs:
* (See the intel documentation on http://developer.intel.com.)
- * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
- * document number 290687-002, 298242-027: 82801BA (ICH2)
- * document number 290733-003, 290739-013: 82801CA (ICH3-S)
- * document number 290716-001, 290718-007: 82801CAM (ICH3-M)
- * document number 290744-001, 290745-025: 82801DB (ICH4)
- * document number 252337-001, 252663-008: 82801DBM (ICH4-M)
- * document number 273599-001, 273645-002: 82801E (C-ICH)
- * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
- * document number 300641-004, 300884-013: 6300ESB
- * document number 301473-002, 301474-026: 82801F (ICH6)
- * document number 313082-001, 313075-006: 631xESB, 632xESB
- * document number 307013-003, 307014-024: 82801G (ICH7)
- * document number 313056-003, 313057-017: 82801H (ICH8)
- * document number 316972-004, 316973-012: 82801I (ICH9)
- * document number 319973-002, 319974-002: 82801J (ICH10)
- * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
- * document number 320066-003, 320257-008: EP80597 (IICH)
- * document number TBD : Cougar Point (CPT)
+ * 82801AA (ICH) : document number 290655-003, 290677-014,
+ * 82801AB (ICHO) : document number 290655-003, 290677-014,
+ * 82801BA (ICH2) : document number 290687-002, 298242-027,
+ * 82801BAM (ICH2-M) : document number 290687-002, 298242-027,
+ * 82801CA (ICH3-S) : document number 290733-003, 290739-013,
+ * 82801CAM (ICH3-M) : document number 290716-001, 290718-007,
+ * 82801DB (ICH4) : document number 290744-001, 290745-025,
+ * 82801DBM (ICH4-M) : document number 252337-001, 252663-008,
+ * 82801E (C-ICH) : document number 273599-001, 273645-002,
+ * 82801EB (ICH5) : document number 252516-001, 252517-028,
+ * 82801ER (ICH5R) : document number 252516-001, 252517-028,
+ * 6300ESB (6300ESB) : document number 300641-004, 300884-013,
+ * 82801FB (ICH6) : document number 301473-002, 301474-026,
+ * 82801FR (ICH6R) : document number 301473-002, 301474-026,
+ * 82801FBM (ICH6-M) : document number 301473-002, 301474-026,
+ * 82801FW (ICH6W) : document number 301473-001, 301474-026,
+ * 82801FRW (ICH6RW) : document number 301473-001, 301474-026,
+ * 631xESB (631xESB) : document number 313082-001, 313075-006,
+ * 632xESB (632xESB) : document number 313082-001, 313075-006,
+ * 82801GB (ICH7) : document number 307013-003, 307014-024,
+ * 82801GR (ICH7R) : document number 307013-003, 307014-024,
+ * 82801GDH (ICH7DH) : document number 307013-003, 307014-024,
+ * 82801GBM (ICH7-M) : document number 307013-003, 307014-024,
+ * 82801GHM (ICH7-M DH) : document number 307013-003, 307014-024,
+ * 82801GU (ICH7-U) : document number 307013-003, 307014-024,
+ * 82801HB (ICH8) : document number 313056-003, 313057-017,
+ * 82801HR (ICH8R) : document number 313056-003, 313057-017,
+ * 82801HBM (ICH8M) : document number 313056-003, 313057-017,
+ * 82801HH (ICH8DH) : document number 313056-003, 313057-017,
+ * 82801HO (ICH8DO) : document number 313056-003, 313057-017,
+ * 82801HEM (ICH8M-E) : document number 313056-003, 313057-017,
+ * 82801IB (ICH9) : document number 316972-004, 316973-012,
+ * 82801IR (ICH9R) : document number 316972-004, 316973-012,
+ * 82801IH (ICH9DH) : document number 316972-004, 316973-012,
+ * 82801IO (ICH9DO) : document number 316972-004, 316973-012,
+ * 82801IBM (ICH9M) : document number 316972-004, 316973-012,
+ * 82801IEM (ICH9M-E) : document number 316972-004, 316973-012,
+ * 82801JIB (ICH10) : document number 319973-002, 319974-002,
+ * 82801JIR (ICH10R) : document number 319973-002, 319974-002,
+ * 82801JD (ICH10D) : document number 319973-002, 319974-002,
+ * 82801JDO (ICH10DO) : document number 319973-002, 319974-002
*/
/*
TCO_ICH10R, /* ICH10R */
TCO_ICH10D, /* ICH10D */
TCO_ICH10DO, /* ICH10DO */
- TCO_PCH, /* PCH Desktop Full Featured */
- TCO_PCHM, /* PCH Mobile Full Featured */
- TCO_P55, /* P55 */
- TCO_PM55, /* PM55 */
- TCO_H55, /* H55 */
- TCO_QM57, /* QM57 */
- TCO_H57, /* H57 */
- TCO_HM55, /* HM55 */
- TCO_Q57, /* Q57 */
- TCO_HM57, /* HM57 */
- TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
- TCO_QS57, /* QS57 */
- TCO_3400, /* 3400 */
- TCO_3420, /* 3420 */
- TCO_3450, /* 3450 */
- TCO_EP80579, /* EP80579 */
- TCO_CPTD, /* CPT Desktop */
- TCO_CPTM, /* CPT Mobile */
};
static struct {
{"ICH10R", 2},
{"ICH10D", 2},
{"ICH10DO", 2},
- {"PCH Desktop Full Featured", 2},
- {"PCH Mobile Full Featured", 2},
- {"P55", 2},
- {"PM55", 2},
- {"H55", 2},
- {"QM57", 2},
- {"H57", 2},
- {"HM55", 2},
- {"Q57", 2},
- {"HM57", 2},
- {"PCH Mobile SFF Full Featured", 2},
- {"QS57", 2},
- {"3400", 2},
- {"3420", 2},
- {"3450", 2},
- {"EP80579", 2},
- {"CPT Desktop", 2},
- {"CPT Mobile", 2},
{NULL, 0}
};
{ ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)},
{ ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)},
{ ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
- { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)},
- { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)},
- { ITCO_PCI_DEVICE(0x3b02, TCO_P55)},
- { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)},
- { ITCO_PCI_DEVICE(0x3b06, TCO_H55)},
- { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)},
- { ITCO_PCI_DEVICE(0x3b08, TCO_H57)},
- { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)},
- { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)},
- { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)},
- { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)},
- { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)},
- { ITCO_PCI_DEVICE(0x3b12, TCO_3400)},
- { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
- { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
- { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
- { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
- { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
/* We aim for 'current allocation' == 'target allocation'. */
unsigned long current_pages;
unsigned long target_pages;
+ /* We may hit the hard limit in Xen. If we do then we remember it. */
+ unsigned long hard_limit;
/*
* Drivers may alter the memory reservation independently, but they
* must inform the balloon driver so we avoid hitting the hard limit.
list_add(&page->lru, &ballooned_pages);
balloon_stats.balloon_low++;
}
-
- totalram_pages--;
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
else
balloon_stats.balloon_low--;
- totalram_pages++;
-
return page;
}
static unsigned long current_target(void)
{
- unsigned long target = balloon_stats.target_pages;
+ unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit);
target = min(target,
balloon_stats.current_pages +
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
- if (rc < 0)
+ if (rc < nr_pages) {
+ if (rc > 0) {
+ int ret;
+
+ /* We hit the Xen hard limit: reprobe. */
+ reservation.nr_extents = rc;
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation);
+ BUG_ON(ret != rc);
+ }
+ if (rc >= 0)
+ balloon_stats.hard_limit = (balloon_stats.current_pages + rc -
+ balloon_stats.driver_pages);
goto out;
+ }
- for (i = 0; i < rc; i++) {
+ for (i = 0; i < nr_pages; i++) {
page = balloon_retrieve();
BUG_ON(page == NULL);
__free_page(page);
}
- balloon_stats.current_pages += rc;
+ balloon_stats.current_pages += nr_pages;
+ totalram_pages = balloon_stats.current_pages;
out:
spin_unlock_irqrestore(&balloon_lock, flags);
- return rc < 0 ? rc : rc != nr_pages;
+ return 0;
}
static int decrease_reservation(unsigned long nr_pages)
BUG_ON(ret != nr_pages);
balloon_stats.current_pages -= nr_pages;
+ totalram_pages = balloon_stats.current_pages;
spin_unlock_irqrestore(&balloon_lock, flags);
static void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
+ balloon_stats.hard_limit = ~0UL;
balloon_stats.target_pages = target;
schedule_work(&balloon_worker);
}
pr_info("xen_balloon: Initialising balloon driver.\n");
balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
+ totalram_pages = balloon_stats.current_pages;
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
balloon_stats.driver_pages = 0UL;
+ balloon_stats.hard_limit = ~0UL;
init_timer(&balloon_timer);
balloon_timer.data = 0;
BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
+BALLOON_SHOW(hard_limit_kb,
+ (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n",
+ (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0);
BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
&attr_current_kb.attr,
&attr_low_kb.attr,
&attr_high_kb.attr,
+ &attr_hard_limit_kb.attr,
&attr_driver_kb.attr,
NULL
};
bind_evtchn_to_cpu(evtchn, 0);
evtchn_to_irq[evtchn] = -1;
- }
-
- if (irq_info[irq].type != IRQT_UNBOUND) {
irq_info[irq] = mk_unbound_info();
dynamic_irq_cleanup(irq);
if (err) {
printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
err);
+ dpm_resume_noirq(PMSG_RESUME);
return err;
}
}
sysdev_resume();
+ dpm_resume_noirq(PMSG_RESUME);
return 0;
}
shutting_down = SHUTDOWN_SUSPEND;
- err = stop_machine_create();
- if (err) {
- printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err);
- goto out;
- }
-
#ifdef CONFIG_PREEMPT
/* If the kernel is preemptible, we need to freeze all the processes
to prevent them from being in the middle of a pagetable update
err = freeze_processes();
if (err) {
printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
- goto out_destroy_sm;
+ return;
}
#endif
err = dpm_suspend_start(PMSG_SUSPEND);
if (err) {
printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
- goto out_thaw;
+ goto out;
}
printk(KERN_DEBUG "suspending xenstore...\n");
err = dpm_suspend_noirq(PMSG_SUSPEND);
if (err) {
printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
- goto out_resume;
+ goto resume_devices;
}
err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
-
- dpm_resume_noirq(PMSG_RESUME);
-
if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
- cancelled = 1;
+ goto out;
}
-out_resume:
if (!cancelled) {
xen_arch_resume();
xs_resume();
} else
xs_suspend_cancel();
+ dpm_resume_noirq(PMSG_RESUME);
+
+resume_devices:
dpm_resume_end(PMSG_RESUME);
/* Make sure timer events get retriggered on all CPUs */
clock_was_set();
-
-out_thaw:
+out:
#ifdef CONFIG_PREEMPT
thaw_processes();
-
-out_destroy_sm:
#endif
- stop_machine_destroy();
-
-out:
shutting_down = SHUTDOWN_INVALID;
}
#endif /* CONFIG_PM_SLEEP */
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
}
-static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
static ssize_t xendev_show_devtype(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
}
-static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
static ssize_t xendev_show_modalias(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
}
-static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
+DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
MODULE_LICENSE("GPL");
-static int is_device_connecting(struct device *dev, void *data)
+static int is_disconnected_device(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
return 0;
xendrv = to_xenbus_driver(dev->driver);
- return (xendev->state < XenbusStateConnected ||
- (xendev->state == XenbusStateConnected &&
- xendrv->is_ready && !xendrv->is_ready(xendev)));
+ return (xendev->state != XenbusStateConnected ||
+ (xendrv->is_ready && !xendrv->is_ready(xendev)));
}
-static int exists_connecting_device(struct device_driver *drv)
+static int exists_disconnected_device(struct device_driver *drv)
{
return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
- is_device_connecting);
+ is_disconnected_device);
}
static int print_device_status(struct device *dev, void *data)
/* Information only: is this too noisy? */
printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
xendev->nodename);
- } else if (xendev->state < XenbusStateConnected) {
- enum xenbus_state rstate = XenbusStateUnknown;
- if (xendev->otherend)
- rstate = xenbus_read_driver_state(xendev->otherend);
+ } else if (xendev->state != XenbusStateConnected) {
printk(KERN_WARNING "XENBUS: Timeout connecting "
- "to device: %s (local state %d, remote state %d)\n",
- xendev->nodename, xendev->state, rstate);
+ "to device: %s (state %d)\n",
+ xendev->nodename, xendev->state);
}
return 0;
static int ready_to_wait_for_devices;
/*
- * On a 5-minute timeout, wait for all devices currently configured. We need
+ * On a 10 second timeout, wait for all devices currently configured. We need
* to do this to guarantee that the filesystems and / or network devices
* needed for boot are available, before we can allow the boot to proceed.
*
*/
static void wait_for_devices(struct xenbus_driver *xendrv)
{
- unsigned long start = jiffies;
+ unsigned long timeout = jiffies + 10*HZ;
struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
- unsigned int seconds_waited = 0;
if (!ready_to_wait_for_devices || !xen_domain())
return;
- while (exists_connecting_device(drv)) {
- if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
- if (!seconds_waited)
- printk(KERN_WARNING "XENBUS: Waiting for "
- "devices to initialise: ");
- seconds_waited += 5;
- printk("%us...", 300 - seconds_waited);
- if (seconds_waited == 300)
- break;
- }
-
+ while (exists_disconnected_device(drv)) {
+ if (time_after(jiffies, timeout))
+ break;
schedule_timeout_interruptible(HZ/10);
}
- if (seconds_waited)
- printk("\n");
-
bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
print_device_status);
}
P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
- if (s->s_root)
- v9fs_dentry_release(s->s_root); /* clunk root */
+ v9fs_dentry_release(s->s_root); /* clunk root */
kill_anon_super(s);
u32 s_last_bmap;
struct buffer_head *s_bmap_bh;
char *s_prefix; /* Prefix for volumes and assigns. */
+ int s_prefix_len; /* Length of prefix. */
char s_volume[32]; /* Volume prefix for absolute symlinks. */
- spinlock_t symlink_lock; /* protects the previous two */
};
#define SF_INTL 0x0001 /* International filesystem. */
p = (char *)AFFS_HEAD(bh)->table;
lc = '/';
if (*symname == '/') {
- struct affs_sb_info *sbi = AFFS_SB(sb);
while (*symname == '/')
symname++;
- spin_lock(&sbi->symlink_lock);
- while (sbi->s_volume[i]) /* Cannot overflow */
- *p++ = sbi->s_volume[i++];
- spin_unlock(&sbi->symlink_lock);
+ while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */
+ *p++ = AFFS_SB(sb)->s_volume[i++];
}
while (i < maxlen && (c = *symname++)) {
if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
switch (token) {
case Opt_bs:
if (match_int(&args[0], &n))
- return 0;
+ return -EINVAL;
if (n != 512 && n != 1024 && n != 2048
&& n != 4096) {
printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
break;
case Opt_mode:
if (match_octal(&args[0], &option))
- return 0;
+ return 1;
*mode = option & 0777;
*mount_opts |= SF_SETMODE;
break;
*mount_opts |= SF_MUFS;
break;
case Opt_prefix:
+ /* Free any previous prefix */
+ kfree(*prefix);
*prefix = match_strdup(&args[0]);
if (!*prefix)
return 0;
break;
case Opt_reserved:
if (match_int(&args[0], reserved))
- return 0;
+ return 1;
break;
case Opt_root:
if (match_int(&args[0], root))
- return 0;
+ return 1;
break;
case Opt_setgid:
if (match_int(&args[0], &option))
- return 0;
+ return 1;
*gid = option;
*mount_opts |= SF_SETGID;
break;
case Opt_setuid:
if (match_int(&args[0], &option))
- return 0;
+ return -EINVAL;
*uid = option;
*mount_opts |= SF_SETUID;
break;
return -ENOMEM;
sb->s_fs_info = sbi;
mutex_init(&sbi->s_bmlock);
- spin_lock_init(&sbi->symlink_lock);
if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
&blocksize,&sbi->s_prefix,
sbi->s_volume, &mount_flags)) {
printk(KERN_ERR "AFFS: Error parsing options\n");
- kfree(sbi->s_prefix);
- kfree(sbi);
return -EINVAL;
}
/* N.B. after this point s_prefix must be released */
unsigned long mount_flags;
int res = 0;
char *new_opts = kstrdup(data, GFP_KERNEL);
- char volume[32];
- char *prefix = NULL;
pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
*flags |= MS_NODIRATIME;
- memcpy(volume, sbi->s_volume, 32);
if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
- &blocksize, &prefix, volume,
+ &blocksize, &sbi->s_prefix, sbi->s_volume,
&mount_flags)) {
- kfree(prefix);
kfree(new_opts);
return -EINVAL;
}
sbi->s_mode = mode;
sbi->s_uid = uid;
sbi->s_gid = gid;
- /* protect against readers */
- spin_lock(&sbi->symlink_lock);
- if (prefix) {
- kfree(sbi->s_prefix);
- sbi->s_prefix = prefix;
- }
- memcpy(sbi->s_volume, volume, 32);
- spin_unlock(&sbi->symlink_lock);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
unlock_kernel();
int i, j;
char c;
char lc;
+ char *pf;
pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
j = 0;
lf = (struct slink_front *)bh->b_data;
lc = 0;
+ pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/";
if (strchr(lf->symname,':')) { /* Handle assign or volume name */
- struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
- char *pf;
- spin_lock(&sbi->symlink_lock);
- pf = sbi->s_prefix ? sbi->s_prefix : "/";
while (i < 1023 && (c = pf[i]))
link[i++] = c;
- spin_unlock(&sbi->symlink_lock);
while (i < 1023 && lf->symname[j] != ':')
link[i++] = lf->symname[j++];
if (i < 1023)
brelse(bh);
unacquire_priv_sbp:
- kfree(befs_sb->mount_opts.iocharset);
kfree(sb->s_fs_info);
unacquire_none:
struct inode *inode;
unsigned i, imap_len;
struct bfs_sb_info *info;
- int ret = -EINVAL;
+ long ret = -EINVAL;
unsigned long i_sblock, i_eblock, i_eoff, s_size;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- mutex_init(&info->bfs_lock);
s->s_fs_info = info;
sb_set_blocksize(s, BFS_BSIZE);
- info->si_sbh = sb_bread(s, 0);
- if (!info->si_sbh)
+ bh = sb_bread(s, 0);
+ if(!bh)
goto out;
- bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data;
+ bfs_sb = (struct bfs_super_block *)bh->b_data;
if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
if (!silent)
printf("No BFS filesystem on %s (magic=%08x)\n",
s->s_id, le32_to_cpu(bfs_sb->s_magic));
- goto out1;
+ goto out;
}
if (BFS_UNCLEAN(bfs_sb, s) && !silent)
printf("%s is unclean, continuing\n", s->s_id);
s->s_magic = BFS_MAGIC;
+ info->si_sbh = bh;
if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
printf("Superblock is corrupted\n");
- goto out1;
+ goto out;
}
info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) /
imap_len = (info->si_lasti / 8) + 1;
info->si_imap = kzalloc(imap_len, GFP_KERNEL);
if (!info->si_imap)
- goto out1;
+ goto out;
for (i = 0; i < BFS_ROOT_INO; i++)
set_bit(i, info->si_imap);
inode = bfs_iget(s, BFS_ROOT_INO);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
- goto out2;
+ kfree(info->si_imap);
+ goto out;
}
s->s_root = d_alloc_root(inode);
if (!s->s_root) {
iput(inode);
ret = -ENOMEM;
- goto out2;
+ kfree(info->si_imap);
+ goto out;
}
info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS;
bh = sb_bread(s, info->si_blocks - 1);
if (!bh) {
printf("Last block not available: %lu\n", info->si_blocks - 1);
+ iput(inode);
ret = -EIO;
- goto out3;
+ kfree(info->si_imap);
+ goto out;
}
brelse(bh);
printf("Inode 0x%08x corrupted\n", i);
brelse(bh);
- ret = -EIO;
- goto out3;
+ s->s_root = NULL;
+ kfree(info->si_imap);
+ kfree(info);
+ s->s_fs_info = NULL;
+ return -EIO;
}
if (!di->i_ino) {
s->s_dirt = 1;
}
dump_imap("read_super", s);
+ mutex_init(&info->bfs_lock);
return 0;
-out3:
- dput(s->s_root);
- s->s_root = NULL;
-out2:
- kfree(info->si_imap);
-out1:
- brelse(info->si_sbh);
out:
- mutex_destroy(&info->bfs_lock);
+ brelse(bh);
kfree(info);
s->s_fs_info = NULL;
return ret;
#else
set_personality(PER_LINUX);
#endif
- setup_new_exec(bprm);
current->mm->end_code = ex.a_text +
(current->mm->start_code = N_TXTADDR(ex));
if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
goto out_free_interp;
+ /*
+ * The early SET_PERSONALITY here is so that the lookup
+ * for the interpreter happens in the namespace of the
+ * to-be-execed image. SET_PERSONALITY can select an
+ * alternate root.
+ *
+ * However, SET_PERSONALITY is NOT allowed to switch
+ * this task into the new images's memory mapping
+ * policy - that is, TASK_SIZE must still evaluate to
+ * that which is appropriate to the execing application.
+ * This is because exit_mmap() needs to have TASK_SIZE
+ * evaluate to the size of the old image.
+ *
+ * So if (say) a 64-bit application is execing a 32-bit
+ * application it is the architecture's responsibility
+ * to defer changing the value of TASK_SIZE until the
+ * switch really is going to happen - do this in
+ * flush_thread(). - akpm
+ */
+ SET_PERSONALITY(loc->elf_ex);
+
interpreter = open_exec(elf_interpreter);
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
/* Verify the interpreter has a valid arch */
if (!elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
+ } else {
+ /* Executables without an interpreter also need a personality */
+ SET_PERSONALITY(loc->elf_ex);
}
/* Flush all traces of the currently running executable */
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
current->flags |= PF_RANDOMIZE;
-
- setup_new_exec(bprm);
+ arch_pick_mmap_layout(current->mm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
unsigned long stack_size, entryaddr;
#ifdef ELF_FDPIC_PLAT_INIT
unsigned long dynaddr;
-#endif
-#ifndef CONFIG_MMU
- unsigned long stack_prot;
#endif
struct file *interpreter = NULL; /* to shut gcc up */
char *interpreter_name = NULL;
* defunct, deceased, etc. after this point we have to exit via
* error_kill */
set_personality(PER_LINUX_FDPIC);
- if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
- current->personality |= READ_IMPLIES_EXEC;
-
- setup_new_exec(bprm);
-
set_binfmt(&elf_fdpic_format);
current->mm->start_code = 0;
if (stack_size < PAGE_SIZE * 2)
stack_size = PAGE_SIZE * 2;
- stack_prot = PROT_READ | PROT_WRITE;
- if (executable_stack == EXSTACK_ENABLE_X ||
- (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
- stack_prot |= PROT_EXEC;
-
down_write(¤t->mm->mmap_sem);
- current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
+ current->mm->start_brk = do_mmap(NULL, 0, stack_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN,
0);
/* OK, This is the point of no return */
set_personality(PER_LINUX_32BIT);
- setup_new_exec(bprm);
}
/*
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
current->personality = PER_HPUX;
- setup_new_exec(bprm);
/* Set the task size for HP-UX processes such that
* the gateway page is outside the address space.
static inline int use_bip_pool(unsigned int idx)
{
- if (idx == BIOVEC_MAX_IDX)
+ if (idx == BIOVEC_NR_POOLS)
return 1;
return 0;
/* Use mempool if lower order alloc failed or max vecs were requested */
if (bip == NULL) {
- idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
if (unlikely(bip == NULL)) {
if (page == prev->bv_page &&
offset == prev->bv_offset + prev->bv_len) {
- unsigned int prev_bv_len = prev->bv_len;
prev->bv_len += len;
if (q->merge_bvec_fn) {
struct bvec_merge_data bvm = {
- /* prev_bvec is already charged in
- bi_size, discharge it in order to
- simulate merging updated prev_bvec
- as new bvec. */
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size - prev_bv_len,
+ .bi_size = bio->bi_size,
.bi_rw = bio->bi_rw,
};
if (!sb)
goto out;
if (sb->s_flags & MS_RDONLY) {
- sb->s_frozen = SB_FREEZE_TRANS;
- up_write(&sb->s_umount);
+ deactivate_locked_super(sb);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
BUG_ON(sb->s_bdev != bdev);
down_write(&sb->s_umount);
if (sb->s_flags & MS_RDONLY)
- goto out_unfrozen;
+ goto out_deactivate;
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
}
}
-out_unfrozen:
sb->s_frozen = SB_UNFROZEN;
smp_wmb();
wake_up(&sb->s_wait_unfrozen);
+out_deactivate:
if (sb)
deactivate_locked_super(sb);
out_unlock:
cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
char *mount_data_global, const char *devname)
{
- int rc;
+ int rc = 0;
int xid;
struct smb_vol *volume_info;
- struct cifsSesInfo *pSesInfo;
- struct cifsTconInfo *tcon;
- struct TCP_Server_Info *srvTcp;
+ struct cifsSesInfo *pSesInfo = NULL;
+ struct cifsTconInfo *tcon = NULL;
+ struct TCP_Server_Info *srvTcp = NULL;
char *full_path;
char *mount_data = mount_data_global;
#ifdef CONFIG_CIFS_DFS_UPCALL
int referral_walks_count = 0;
try_mount_again:
#endif
- rc = 0;
- tcon = NULL;
- pSesInfo = NULL;
- srvTcp = NULL;
full_path = NULL;
xid = GetXid();
cleanup_volume_info(&volume_info);
referral_walks_count++;
- FreeXid(xid);
goto try_mount_again;
}
#else /* No DFS support, return error on mount */
min(len, max_len), nlt,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- pqst->len -= nls_nullsize(nlt);
} else {
pqst->name = filename;
pqst->len = len;
static int debugfs_mount_count;
static bool debugfs_registered;
-static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev,
- void *data, const struct file_operations *fops)
-
+static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev)
{
struct inode *inode = new_inode(sb);
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
- inode->i_fop = fops ? fops : &debugfs_file_operations;
- inode->i_private = data;
+ inode->i_fop = &debugfs_file_operations;
break;
case S_IFLNK:
inode->i_op = &debugfs_link_operations;
- inode->i_fop = fops;
- inode->i_private = data;
break;
case S_IFDIR:
inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = fops ? fops : &simple_dir_operations;
- inode->i_private = data;
+ inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2
* (for "." entry) */
/* SMP-safe */
static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t dev, void *data,
- const struct file_operations *fops)
+ int mode, dev_t dev)
{
struct inode *inode;
int error = -EPERM;
if (dentry->d_inode)
return -EEXIST;
- inode = debugfs_get_inode(dir->i_sb, mode, dev, data, fops);
+ inode = debugfs_get_inode(dir->i_sb, mode, dev);
if (inode) {
d_instantiate(dentry, inode);
dget(dentry);
return error;
}
-static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
- void *data, const struct file_operations *fops)
+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
int res;
mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
- res = debugfs_mknod(dir, dentry, mode, 0, data, fops);
+ res = debugfs_mknod(dir, dentry, mode, 0);
if (!res) {
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
return res;
}
-static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode,
- void *data, const struct file_operations *fops)
+static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode)
{
mode = (mode & S_IALLUGO) | S_IFLNK;
- return debugfs_mknod(dir, dentry, mode, 0, data, fops);
+ return debugfs_mknod(dir, dentry, mode, 0);
}
-static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode,
- void *data, const struct file_operations *fops)
+static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode)
{
int res;
mode = (mode & S_IALLUGO) | S_IFREG;
- res = debugfs_mknod(dir, dentry, mode, 0, data, fops);
+ res = debugfs_mknod(dir, dentry, mode, 0);
if (!res)
fsnotify_create(dir, dentry);
return res;
static int debugfs_create_by_name(const char *name, mode_t mode,
struct dentry *parent,
- struct dentry **dentry,
- void *data,
- const struct file_operations *fops)
+ struct dentry **dentry)
{
int error = 0;
if (!IS_ERR(*dentry)) {
switch (mode & S_IFMT) {
case S_IFDIR:
- error = debugfs_mkdir(parent->d_inode, *dentry, mode,
- data, fops);
+ error = debugfs_mkdir(parent->d_inode, *dentry, mode);
break;
case S_IFLNK:
- error = debugfs_link(parent->d_inode, *dentry, mode,
- data, fops);
+ error = debugfs_link(parent->d_inode, *dentry, mode);
break;
default:
- error = debugfs_create(parent->d_inode, *dentry, mode,
- data, fops);
+ error = debugfs_create(parent->d_inode, *dentry, mode);
break;
}
dput(*dentry);
if (error)
goto exit;
- error = debugfs_create_by_name(name, mode, parent, &dentry,
- data, fops);
+ error = debugfs_create_by_name(name, mode, parent, &dentry);
if (error) {
dentry = NULL;
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
goto exit;
}
+
+ if (dentry->d_inode) {
+ if (data)
+ dentry->d_inode->i_private = data;
+ if (fops)
+ dentry->d_inode->i_fop = fops;
+ }
exit:
return dentry;
}
struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number)
{
- struct dentry *dentry;
- struct tty_struct *tty;
-
BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
- /* Ensure dentry has not been deleted by devpts_pty_kill() */
- dentry = d_find_alias(pts_inode);
- if (!dentry)
- return NULL;
-
- tty = NULL;
if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
- tty = (struct tty_struct *)pts_inode->i_private;
-
- dput(dentry);
-
- return tty;
+ return (struct tty_struct *)pts_inode->i_private;
+ return NULL;
}
void devpts_pty_kill(struct tty_struct *tty)
char *cipher_name, size_t *key_size)
{
char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
- char *full_alg_name = NULL;
+ char *full_alg_name;
int rc;
*key_tfm = NULL;
if (rc)
goto out;
*key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
+ kfree(full_alg_name);
if (IS_ERR(*key_tfm)) {
rc = PTR_ERR(*key_tfm);
printk(KERN_ERR "Unable to allocate crypto cipher with name "
goto out;
}
out:
- kfree(full_alg_name);
return rc;
}
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
+ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
+ && !(file->f_flags & O_RDONLY)) {
+ rc = -EPERM;
+ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
+ "file must hence be opened RO\n", __func__);
+ goto out;
+ }
if (!ecryptfs_inode_to_private(inode)->lower_file) {
rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
if (rc) {
goto out;
}
}
- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
- && !(file->f_flags & O_RDONLY)) {
- rc = -EPERM;
- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
- "file must hence be opened RO\n", __func__);
- goto out;
- }
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
return rc;
}
-int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat)
-{
- struct kstat lower_stat;
- int rc;
-
- rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
- ecryptfs_dentry_to_lower(dentry), &lower_stat);
- if (!rc) {
- generic_fillattr(dentry->d_inode, stat);
- stat->blocks = lower_stat.blocks;
- }
- return rc;
-}
-
int
ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
const struct inode_operations ecryptfs_main_iops = {
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
- .getattr = ecryptfs_getattr,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
struct vm_area_struct *prev = NULL;
unsigned long vm_flags;
unsigned long stack_base;
- unsigned long stack_size;
- unsigned long stack_expand;
- unsigned long rlim_stack;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size to 1GB */
goto out_unlock;
}
- stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
- stack_size = vma->vm_end - vma->vm_start;
- /*
- * Align this down to a page boundary as expand_stack
- * will align it up.
- */
- rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
- rlim_stack = min(rlim_stack, stack_size);
#ifdef CONFIG_STACK_GROWSUP
- if (stack_size + stack_expand > rlim_stack)
- stack_base = vma->vm_start + rlim_stack;
- else
- stack_base = vma->vm_end + stack_expand;
+ stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
#else
- if (stack_size + stack_expand > rlim_stack)
- stack_base = vma->vm_end - rlim_stack;
- else
- stack_base = vma->vm_start - stack_expand;
+ stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
#endif
ret = expand_stack(vma, stack_base);
if (ret)
int flush_old_exec(struct linux_binprm * bprm)
{
- int retval;
+ char * name;
+ int i, ch, retval;
+ char tcomm[sizeof(current->comm)];
/*
* Make sure we have a private signal table and that
bprm->mm = NULL; /* We're using it now */
- current->flags &= ~PF_RANDOMIZE;
- flush_thread();
- current->personality &= ~bprm->per_clear;
-
- return 0;
-
-out:
- return retval;
-}
-EXPORT_SYMBOL(flush_old_exec);
-
-void setup_new_exec(struct linux_binprm * bprm)
-{
- int i, ch;
- char * name;
- char tcomm[sizeof(current->comm)];
-
- arch_pick_mmap_layout(current->mm);
-
/* This is the point of no return */
current->sas_ss_sp = current->sas_ss_size = 0;
tcomm[i] = '\0';
set_task_comm(current, tcomm);
+ current->flags &= ~PF_RANDOMIZE;
+ flush_thread();
+
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
set_dumpable(current->mm, suid_dumpable);
}
+ current->personality &= ~bprm->per_clear;
+
/*
* Flush performance counters when crossing a
* security domain:
flush_signal_handlers(current, 0);
flush_old_files(current->files);
+
+ return 0;
+
+out:
+ return retval;
}
-EXPORT_SYMBOL(setup_new_exec);
+
+EXPORT_SYMBOL(flush_old_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
fsdata);
}
-static int exofs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = mapping->host;
- /* According to comment in simple_write_end i_mutex is held */
- loff_t i_size = inode->i_size;
- int ret;
-
- ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
- if (i_size != inode->i_size)
- mark_inode_dirty(inode);
- return ret;
-}
-
const struct address_space_operations exofs_aops = {
.readpage = exofs_readpage,
.readpages = exofs_readpages,
.writepage = exofs_writepage,
.writepages = exofs_writepages,
.write_begin = exofs_write_begin_export,
- .write_end = exofs_write_end,
+ .write_end = simple_write_end,
};
/******************************************************************************
return ext3_journal_get_write_access(handle, bh);
}
-/*
- * Truncate blocks that were not used by write. We have to truncate the
- * pagecache as well so that corresponding buffers get properly unmapped.
- */
-static void ext3_truncate_failed_write(struct inode *inode)
-{
- truncate_inode_pages(inode->i_mapping, inode->i_size);
- ext3_truncate(inode);
-}
-
static int ext3_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate_failed_write(inode);
+ ext3_truncate(inode);
}
if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry;
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate_failed_write(inode);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate_failed_write(inode);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate_failed_write(inode);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
ext4_group_t group)
{
- if (!ext4_bg_has_super(sb, group))
- return 0;
-
- if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
- return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
- else
- return EXT4_SB(sb)->s_gdb_count;
+ return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
}
/**
if (ext4_bg_has_super(sb, i) &&
((i < 5) || ((i % flex_size) == 0)))
add_system_zone(sbi, ext4_group_first_block_no(sb, i),
- ext4_bg_num_gdb(sb, i) + 1);
+ sbi->s_gdb_count + 1);
gdp = ext4_get_group_desc(sb, i, NULL);
ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
if (ret)
__u16 i_extra_isize;
spinlock_t i_block_reservation_lock;
-#ifdef CONFIG_QUOTA
- /* quota space reservation, managed internally by quota code */
- qsize_t i_reserved_quota;
-#endif
/* completed async DIOs that might need unwritten extents handling */
struct list_head i_aio_dio_complete_list;
/* current io_end structure for async DIO write*/
ext4_io_end_t *cur_aio_dio;
-
- /*
- * Transactions that contain inode's metadata needed to complete
- * fsync and fdatasync, respectively.
- */
- tid_t i_sync_tid;
- tid_t i_datasync_tid;
};
/*
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
-#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */
#define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt
#define set_opt(o, opt) o |= EXT4_MOUNT_##opt
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern qsize_t *ext4_get_reserved_space(struct inode *inode);
+extern qsize_t ext4_get_reserved_space(struct inode *inode);
extern int flush_aio_dio_completed_IO(struct inode *inode);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
#define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \
EXT4_XATTR_TRANS_BLOCKS - 2 + \
- EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
+ 2*EXT4_QUOTA_TRANS_BLOCKS(sb))
/*
* Define the number of metadata blocks we need to account to modify data.
* This include super block, inode block, quota blocks and xattr blocks
*/
#define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \
- EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
+ 2*EXT4_QUOTA_TRANS_BLOCKS(sb))
/* Delete operations potentially hit one directory's namespace plus an
* entire inode, plus arbitrary amounts of bitmap/indirection data. Be
* but inode, sb and group updates are done only once */
#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
-
#define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
#else
#define EXT4_QUOTA_INIT_BLOCKS(sb) 0
#define EXT4_QUOTA_DEL_BLOCKS(sb) 0
#endif
-#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
-#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
-#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
int
ext4_mark_iloc_dirty(handle_t *handle,
return 0;
}
-static inline void ext4_update_inode_fsync_trans(handle_t *handle,
- struct inode *inode,
- int datasync)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
-
- if (ext4_handle_valid(handle)) {
- ei->i_sync_tid = handle->h_transaction->t_tid;
- if (datasync)
- ei->i_datasync_tid = handle->h_transaction->t_tid;
- }
-}
-
/* super.c */
int ext4_force_commit(struct super_block *sb);
while (block < last && block != EXT_MAX_BLOCK) {
num = last - block;
/* find extent for this block */
- down_read(&EXT4_I(inode)->i_data_sem);
path = ext4_ext_find_extent(inode, block, path);
- up_read(&EXT4_I(inode)->i_data_sem);
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
ext_debug("free last %u blocks starting %llu\n", num, start);
for (i = 0; i < num; i++) {
bh = sb_find_get_block(inode->i_sb, start + i);
- ext4_forget(handle, metadata, inode, bh, start + i);
+ ext4_forget(handle, 0, inode, bh, start + i);
}
ext4_free_blocks(handle, inode, start, num, metadata);
} else if (from == le32_to_cpu(ex->ee_block)
correct_index = 1;
credits += (ext_depth(inode)) + 1;
}
- credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
+ credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
err = ext4_ext_truncate_extend_restart(handle, inode, credits);
if (err)
if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
ret = ext4_convert_unwritten_extents_dio(handle, inode,
path);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
goto out2;
}
/* buffered IO case */
ret = ext4_ext_convert_to_initialized(handle, inode,
path, iblock,
max_blocks);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
out:
if (ret <= 0) {
err = ret;
allocated = ext4_ext_get_actual_len(&newex);
set_buffer_new(bh_result);
- /*
- * Cache the extent and update transaction to commit on fdatasync only
- * when it is _not_ an uninitialized extent.
- */
- if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
+ /* Cache only when it is _not_ an uninitialized extent */
+ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
EXT4_EXT_CACHE_EXTENT);
- ext4_update_inode_fsync_trans(handle, inode, 1);
- } else
- ext4_update_inode_fsync_trans(handle, inode, 0);
out:
if (allocated > max_blocks)
allocated = max_blocks;
* Walk the extent tree gathering extent information.
* ext4_ext_fiemap_cb will push extents back to user.
*/
+ down_read(&EXT4_I(inode)->i_data_sem);
error = ext4_ext_walk_space(inode, start_blk, len_blks,
ext4_ext_fiemap_cb, fieinfo);
+ up_read(&EXT4_I(inode)->i_data_sem);
}
return error;
int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
{
struct inode *inode = dentry->d_inode;
- struct ext4_inode_info *ei = EXT4_I(inode);
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- int ret;
- tid_t commit_tid;
+ int err, ret = 0;
J_ASSERT(ext4_journal_current_handle() == NULL);
trace_ext4_sync_file(file, dentry, datasync);
- if (inode->i_sb->s_flags & MS_RDONLY)
- return 0;
-
ret = flush_aio_dio_completed_IO(inode);
if (ret < 0)
- return ret;
-
- if (!journal)
- return simple_fsync(file, dentry, datasync);
-
+ goto out;
/*
- * data=writeback,ordered:
+ * data=writeback:
* The caller's filemap_fdatawrite()/wait will sync the data.
- * Metadata is in the journal, we wait for proper transaction to
- * commit here.
+ * sync_inode() will sync the metadata
+ *
+ * data=ordered:
+ * The caller's filemap_fdatawrite() will write the data and
+ * sync_inode() will write the inode if it is dirty. Then the caller's
+ * filemap_fdatawait() will wait on the pages.
*
* data=journal:
* filemap_fdatawrite won't do anything (the buffers are clean).
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
- if (ext4_should_journal_data(inode))
- return ext4_force_commit(inode->i_sb);
+ if (ext4_should_journal_data(inode)) {
+ ret = ext4_force_commit(inode->i_sb);
+ goto out;
+ }
- commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
- if (jbd2_log_start_commit(journal, commit_tid))
- jbd2_log_wait_commit(journal, commit_tid);
- else if (journal->j_flags & JBD2_BARRIER)
+ if (!journal)
+ ret = sync_mapping_buffers(inode->i_mapping);
+
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ goto out;
+
+ /*
+ * The VFS has written the file data. If the inode is unaltered
+ * then we need not start a commit.
+ */
+ if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0, /* sys_fsync did this */
+ };
+ err = sync_inode(inode, &wbc);
+ if (ret == 0)
+ ret = err;
+ }
+out:
+ if (journal && (journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
return ret;
}
if (!err)
err = ext4_splice_branch(handle, inode, iblock,
partial, indirect_blks, count);
- if (err)
+ else
goto cleanup;
set_buffer_new(bh_result);
-
- ext4_update_inode_fsync_trans(handle, inode, 1);
got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
if (count > blocks_to_boundary)
return err;
}
-#ifdef CONFIG_QUOTA
-qsize_t *ext4_get_reserved_space(struct inode *inode)
+qsize_t ext4_get_reserved_space(struct inode *inode)
{
- return &EXT4_I(inode)->i_reserved_quota;
+ unsigned long long total;
+
+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ total = EXT4_I(inode)->i_reserved_data_blocks +
+ EXT4_I(inode)->i_reserved_meta_blocks;
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ return total;
}
-#endif
/*
* Calculate the number of metadata blocks need to reserve
* to allocate @blocks for non extent file based file
return ext4_journal_get_write_access(handle, bh);
}
-/*
- * Truncate blocks that were not used by write. We have to truncate the
- * pagecache as well so that corresponding buffers get properly unmapped.
- */
-static void ext4_truncate_failed_write(struct inode *inode)
-{
- truncate_inode_pages(inode->i_mapping, inode->i_size);
- ext4_truncate(inode);
-}
-
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
ext4_journal_stop(handle);
if (pos + len > inode->i_size) {
- ext4_truncate_failed_write(inode);
+ ext4_truncate(inode);
/*
* If truncate failed early the inode might
* still be on the orphan list; we need to
ret = ret2;
if (pos + len > inode->i_size) {
- ext4_truncate_failed_write(inode);
+ ext4_truncate(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
ret = ret2;
if (pos + len > inode->i_size) {
- ext4_truncate_failed_write(inode);
+ ext4_truncate(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
- ext4_truncate_failed_write(inode);
+ ext4_truncate(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
total = md_needed + nrblocks;
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/*
* Make quota reservation here to prevent quota overflow
* later. Real quota accounting is done at pages writeout
* time.
*/
- if (vfs_dq_reserve_block(inode, total))
+ if (vfs_dq_reserve_block(inode, total)) {
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
return -EDQUOT;
+ }
if (ext4_claim_free_blocks(sbi, total)) {
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
vfs_dq_release_reservation_block(inode, total);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
}
return -ENOSPC;
}
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
- EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
return 0; /* success */
}
* number of contiguous block. So we will limit
* number of contiguous block to a sane value
*/
- if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
+ if (!(inode->i_flags & EXT4_EXTENTS_FL) &&
(max_blocks > EXT4_MAX_TRANS_DATA))
max_blocks = EXT4_MAX_TRANS_DATA;
* i_size_read because we hold i_mutex.
*/
if (pos + len > inode->i_size)
- ext4_truncate_failed_write(inode);
+ ext4_truncate(inode);
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
__le32 *last)
{
__le32 *p;
- int is_metadata = S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode);
-
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
*p = 0;
tbh = sb_find_get_block(inode->i_sb, nr);
- ext4_forget(handle, is_metadata, inode, tbh, nr);
+ ext4_forget(handle, 0, inode, tbh, nr);
}
}
- ext4_free_blocks(handle, inode, block_to_free, count, is_metadata);
+ ext4_free_blocks(handle, inode, block_to_free, count, 0);
}
/**
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
struct ext4_inode_info *ei;
+ struct buffer_head *bh;
struct inode *inode;
- journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
int block;
return inode;
ei = EXT4_I(inode);
- iloc.bh = 0;
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
goto bad_inode;
+ bh = iloc.bh;
raw_inode = ext4_raw_inode(&iloc);
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
if (inode->i_mode == 0 ||
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
/* this inode is deleted */
+ brelse(bh);
ret = -ESTALE;
goto bad_inode;
}
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
ei->i_disksize = inode->i_size;
-#ifdef CONFIG_QUOTA
- ei->i_reserved_quota = 0;
-#endif
inode->i_generation = le32_to_cpu(raw_inode->i_generation);
ei->i_block_group = iloc.block_group;
ei->i_last_alloc_group = ~0;
ei->i_data[block] = raw_inode->i_block[block];
INIT_LIST_HEAD(&ei->i_orphan);
- /*
- * Set transaction id's of transactions that have to be committed
- * to finish f[data]sync. We set them to currently running transaction
- * as we cannot be sure that the inode or some of its metadata isn't
- * part of the transaction - the inode could have been reclaimed and
- * now it is reread from disk.
- */
- if (journal) {
- transaction_t *transaction;
- tid_t tid;
-
- spin_lock(&journal->j_state_lock);
- if (journal->j_running_transaction)
- transaction = journal->j_running_transaction;
- else
- transaction = journal->j_committing_transaction;
- if (transaction)
- tid = transaction->t_tid;
- else
- tid = journal->j_commit_sequence;
- spin_unlock(&journal->j_state_lock);
- ei->i_sync_tid = tid;
- ei->i_datasync_tid = tid;
- }
-
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT4_INODE_SIZE(inode->i_sb)) {
+ brelse(bh);
ret = -EIO;
goto bad_inode;
}
ret = 0;
if (ei->i_file_acl &&
- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
+ ((ei->i_file_acl <
+ (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
+ EXT4_SB(sb)->s_gdb_count)) ||
+ (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
ext4_error(sb, __func__,
"bad extended attribute block %llu in inode #%lu",
ei->i_file_acl, inode->i_ino);
/* Validate block references which are part of inode */
ret = ext4_check_inode_blockref(inode);
}
- if (ret)
+ if (ret) {
+ brelse(bh);
goto bad_inode;
+ }
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
} else {
+ brelse(bh);
ret = -EIO;
ext4_error(inode->i_sb, __func__,
"bogus i_mode (%o) for inode=%lu",
return inode;
bad_inode:
- brelse(iloc.bh);
iget_failed(inode);
return ERR_PTR(ret);
}
err = rc;
ei->i_state &= ~EXT4_STATE_NEW;
- ext4_update_inode_fsync_trans(handle, inode, 0);
out_brelse:
brelse(bh);
ext4_std_error(inode->i_sb, err);
/* (user+group)*(old+new) structure, inode write (sb,
* inode block, ? - but truncate inode update has it) */
- handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
- EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
+ handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
+ EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
struct file *donor_filp;
int err;
- if (!(filp->f_mode & FMODE_READ) ||
- !(filp->f_mode & FMODE_WRITE))
- return -EBADF;
-
if (copy_from_user(&me,
(struct move_extent __user *)arg, sizeof(me)))
return -EFAULT;
- me.moved_len = 0;
donor_filp = fget(me.donor_fd);
if (!donor_filp)
return -EBADF;
- if (!(donor_filp->f_mode & FMODE_WRITE)) {
- err = -EBADF;
- goto mext_out;
+ if (!capable(CAP_DAC_OVERRIDE)) {
+ if ((current->real_cred->fsuid != inode->i_uid) ||
+ !(inode->i_mode & S_IRUSR) ||
+ !(donor_filp->f_dentry->d_inode->i_mode &
+ S_IRUSR)) {
+ fput(donor_filp);
+ return -EACCES;
+ }
}
- err = mnt_want_write(filp->f_path.mnt);
- if (err)
- goto mext_out;
-
err = ext4_move_extents(filp, donor_filp, me.orig_start,
me.donor_start, me.len, &me.moved_len);
- mnt_drop_write(filp->f_path.mnt);
- if (me.moved_len > 0)
- file_remove_suid(donor_filp);
+ fput(donor_filp);
if (copy_to_user((struct move_extent *)arg, &me, sizeof(me)))
- err = -EFAULT;
-mext_out:
- fput(donor_filp);
+ return -EFAULT;
+
return err;
}
struct ext4_group_info *db;
int err, count = 0, count2 = 0;
struct ext4_free_data *entry;
+ ext4_fsblk_t discard_block;
struct list_head *l, *ltmp;
list_for_each_safe(l, ltmp, &txn->t_private_list) {
page_cache_release(e4b.bd_bitmap_page);
}
ext4_unlock_group(sb, entry->group);
- if (test_opt(sb, DISCARD)) {
- ext4_fsblk_t discard_block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-
- discard_block = (ext4_fsblk_t)entry->group *
- EXT4_BLOCKS_PER_GROUP(sb)
- + entry->start_blk
- + le32_to_cpu(es->s_first_data_block);
- trace_ext4_discard_blocks(sb,
- (unsigned long long)discard_block,
- entry->count);
- sb_issue_discard(sb, discard_block, entry->count);
- }
+ discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
+ + entry->start_blk
+ + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
+ trace_ext4_discard_blocks(sb, (unsigned long long)discard_block,
+ entry->count);
+ sb_issue_discard(sb, discard_block, entry->count);
+
kmem_cache_free(ext4_free_ext_cachep, entry);
ext4_mb_release_desc(&e4b);
}
trace_ext4_mballoc_prealloc(ac);
}
-/*
- * Called on failure; free up any blocks from the inode PA for this
- * context. We don't need this for MB_GROUP_PA because we only change
- * pa_free in ext4_mb_release_context(), but on failure, we've already
- * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
- */
-static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
-{
- struct ext4_prealloc_space *pa = ac->ac_pa;
- int len;
-
- if (pa && pa->pa_type == MB_INODE_PA) {
- len = ac->ac_b_ex.fe_len;
- pa->pa_free += len;
- }
-
-}
-
/*
* use blocks preallocated to inode
*/
ac->ac_status = AC_STATUS_CONTINUE;
goto repeat;
} else if (*errp) {
- ext4_discard_allocated_blocks(ac);
ac->ac_b_ex.fe_len = 0;
ar->len = 0;
ext4_mb_show_ac(ac);
* So allocate a credit of 3. We may update
* quota (user and group).
*/
- needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
+ needed = 3 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
if (ext4_journal_extend(handle, needed) != 0)
retval = ext4_journal_restart(handle, needed);
handle = ext4_journal_start(inode,
EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)
+ 2 * EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)
+ 1);
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
struct ext4_extent **extent)
{
- struct ext4_extent_header *eh;
int ppos, leaf_ppos = path->p_depth;
ppos = leaf_ppos;
if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
/* leaf block */
*extent = ++path[ppos].p_ext;
- path[ppos].p_block = ext_pblock(path[ppos].p_ext);
return 0;
}
ext_block_hdr(path[cur_ppos+1].p_bh);
}
- path[leaf_ppos].p_ext = *extent = NULL;
-
- eh = path[leaf_ppos].p_hdr;
- if (le16_to_cpu(eh->eh_entries) == 0)
- /* empty leaf is found */
- return -ENODATA;
-
/* leaf block */
path[leaf_ppos].p_ext = *extent =
EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
- path[leaf_ppos].p_block =
- ext_pblock(path[leaf_ppos].p_ext);
return 0;
}
}
}
/**
- * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
+ * mext_double_down_read - Acquire two inodes' read semaphore
+ *
+ * @orig_inode: original inode structure
+ * @donor_inode: donor inode structure
+ * Acquire read semaphore of the two inodes (orig and donor) by i_ino order.
+ */
+static void
+mext_double_down_read(struct inode *orig_inode, struct inode *donor_inode)
+{
+ struct inode *first = orig_inode, *second = donor_inode;
+
+ /*
+ * Use the inode number to provide the stable locking order instead
+ * of its address, because the C language doesn't guarantee you can
+ * compare pointers that don't come from the same array.
+ */
+ if (donor_inode->i_ino < orig_inode->i_ino) {
+ first = donor_inode;
+ second = orig_inode;
+ }
+
+ down_read(&EXT4_I(first)->i_data_sem);
+ down_read(&EXT4_I(second)->i_data_sem);
+}
+
+/**
+ * mext_double_down_write - Acquire two inodes' write semaphore
*
* @orig_inode: original inode structure
* @donor_inode: donor inode structure
- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
- * i_ino order.
+ * Acquire write semaphore of the two inodes (orig and donor) by i_ino order.
*/
static void
-double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
+mext_double_down_write(struct inode *orig_inode, struct inode *donor_inode)
{
struct inode *first = orig_inode, *second = donor_inode;
}
down_write(&EXT4_I(first)->i_data_sem);
- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ down_write(&EXT4_I(second)->i_data_sem);
}
/**
- * double_up_write_data_sem - Release two inodes' write lock of i_data_sem
+ * mext_double_up_read - Release two inodes' read semaphore
*
* @orig_inode: original inode structure to be released its lock first
* @donor_inode: donor inode structure to be released its lock second
- * Release write lock of i_data_sem of two inodes (orig and donor).
+ * Release read semaphore of two inodes (orig and donor).
*/
static void
-double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
+mext_double_up_read(struct inode *orig_inode, struct inode *donor_inode)
+{
+ up_read(&EXT4_I(orig_inode)->i_data_sem);
+ up_read(&EXT4_I(donor_inode)->i_data_sem);
+}
+
+/**
+ * mext_double_up_write - Release two inodes' write semaphore
+ *
+ * @orig_inode: original inode structure to be released its lock first
+ * @donor_inode: donor inode structure to be released its lock second
+ * Release write semaphore of two inodes (orig and donor).
+ */
+static void
+mext_double_up_write(struct inode *orig_inode, struct inode *donor_inode)
{
up_write(&EXT4_I(orig_inode)->i_data_sem);
up_write(&EXT4_I(donor_inode)->i_data_sem);
* @donor_inode: donor inode
* @from: block offset of orig_inode
* @count: block count to be replaced
- * @err: pointer to save return value
*
* Replace original inode extents and donor inode extents page by page.
* We implement this replacement in the following three steps:
* 3. Change the block information of donor inode to point at the saved
* original inode blocks in the dummy extents.
*
- * Return replaced block count.
+ * Return 0 on success, or a negative error value on failure.
*/
static int
mext_replace_branches(handle_t *handle, struct inode *orig_inode,
struct inode *donor_inode, ext4_lblk_t from,
- ext4_lblk_t count, int *err)
+ ext4_lblk_t count)
{
struct ext4_ext_path *orig_path = NULL;
struct ext4_ext_path *donor_path = NULL;
struct ext4_extent *oext, *dext;
struct ext4_extent tmp_dext, tmp_oext;
ext4_lblk_t orig_off = from, donor_off = from;
+ int err = 0;
int depth;
int replaced_count = 0;
int dext_alen;
- /* Protect extent trees against block allocations via delalloc */
- double_down_write_data_sem(orig_inode, donor_inode);
+ mext_double_down_write(orig_inode, donor_inode);
/* Get the original extent for the block "orig_off" */
- *err = get_ext_path(orig_inode, orig_off, &orig_path);
- if (*err)
+ err = get_ext_path(orig_inode, orig_off, &orig_path);
+ if (err)
goto out;
/* Get the donor extent for the head */
- *err = get_ext_path(donor_inode, donor_off, &donor_path);
- if (*err)
+ err = get_ext_path(donor_inode, donor_off, &donor_path);
+ if (err)
goto out;
depth = ext_depth(orig_inode);
oext = orig_path[depth].p_ext;
dext = donor_path[depth].p_ext;
tmp_dext = *dext;
- *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
+ err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
donor_off, count);
- if (*err)
+ if (err)
goto out;
/* Loop for the donor extents */
if (!dext) {
ext4_error(donor_inode->i_sb, __func__,
"The extent for donor must be found");
- *err = -EIO;
+ err = -EIO;
goto out;
} else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
ext4_error(donor_inode->i_sb, __func__,
"extent(%u) should be equal",
donor_off,
le32_to_cpu(tmp_dext.ee_block));
- *err = -EIO;
+ err = -EIO;
goto out;
}
/* Set donor extent to orig extent */
- *err = mext_leaf_block(handle, orig_inode,
+ err = mext_leaf_block(handle, orig_inode,
orig_path, &tmp_dext, &orig_off);
- if (*err)
+ if (err < 0)
goto out;
/* Set orig extent to donor extent */
- *err = mext_leaf_block(handle, donor_inode,
+ err = mext_leaf_block(handle, donor_inode,
donor_path, &tmp_oext, &donor_off);
- if (*err)
+ if (err < 0)
goto out;
dext_alen = ext4_ext_get_actual_len(&tmp_dext);
if (orig_path)
ext4_ext_drop_refs(orig_path);
- *err = get_ext_path(orig_inode, orig_off, &orig_path);
- if (*err)
+ err = get_ext_path(orig_inode, orig_off, &orig_path);
+ if (err)
goto out;
depth = ext_depth(orig_inode);
oext = orig_path[depth].p_ext;
+ if (le32_to_cpu(oext->ee_block) +
+ ext4_ext_get_actual_len(oext) <= orig_off) {
+ err = 0;
+ goto out;
+ }
tmp_oext = *oext;
if (donor_path)
ext4_ext_drop_refs(donor_path);
- *err = get_ext_path(donor_inode, donor_off, &donor_path);
- if (*err)
+ err = get_ext_path(donor_inode, donor_off, &donor_path);
+ if (err)
goto out;
depth = ext_depth(donor_inode);
dext = donor_path[depth].p_ext;
+ if (le32_to_cpu(dext->ee_block) +
+ ext4_ext_get_actual_len(dext) <= donor_off) {
+ err = 0;
+ goto out;
+ }
tmp_dext = *dext;
- *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
+ err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
donor_off, count - replaced_count);
- if (*err)
+ if (err)
goto out;
}
kfree(donor_path);
}
- ext4_ext_invalidate_cache(orig_inode);
- ext4_ext_invalidate_cache(donor_inode);
-
- double_up_write_data_sem(orig_inode, donor_inode);
-
- return replaced_count;
+ mext_double_up_write(orig_inode, donor_inode);
+ return err;
}
/**
* @data_offset_in_page: block index where data swapping starts
* @block_len_in_page: the number of blocks to be swapped
* @uninit: orig extent is uninitialized or not
- * @err: pointer to save return value
*
* Save the data in original inode blocks and replace original inode extents
* with donor inode extents by calling mext_replace_branches().
- * Finally, write out the saved data in new original inode blocks. Return
- * replaced block count.
+ * Finally, write out the saved data in new original inode blocks. Return 0
+ * on success, or a negative error value on failure.
*/
static int
move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
pgoff_t orig_page_offset, int data_offset_in_page,
- int block_len_in_page, int uninit, int *err)
+ int block_len_in_page, int uninit)
{
struct inode *orig_inode = o_filp->f_dentry->d_inode;
struct address_space *mapping = orig_inode->i_mapping;
long long offs = orig_page_offset << PAGE_CACHE_SHIFT;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
unsigned int w_flags = 0;
- unsigned int tmp_data_size, data_size, replaced_size;
+ unsigned int tmp_data_len, data_len;
void *fsdata;
- int i, jblocks;
- int err2 = 0;
- int replaced_count = 0;
+ int ret, i, jblocks;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
/*
jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
handle = ext4_journal_start(orig_inode, jblocks);
if (IS_ERR(handle)) {
- *err = PTR_ERR(handle);
- return 0;
+ ret = PTR_ERR(handle);
+ return ret;
}
if (segment_eq(get_fs(), KERNEL_DS))
* Just swap data blocks between orig and donor.
*/
if (uninit) {
- replaced_count = mext_replace_branches(handle, orig_inode,
- donor_inode, orig_blk_offset,
- block_len_in_page, err);
+ ret = mext_replace_branches(handle, orig_inode,
+ donor_inode, orig_blk_offset,
+ block_len_in_page);
+
+ /* Clear the inode cache not to refer to the old data */
+ ext4_ext_invalidate_cache(orig_inode);
+ ext4_ext_invalidate_cache(donor_inode);
goto out2;
}
offs = (long long)orig_blk_offset << orig_inode->i_blkbits;
- /* Calculate data_size */
+ /* Calculate data_len */
if ((orig_blk_offset + block_len_in_page - 1) ==
((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
/* Replace the last block */
- tmp_data_size = orig_inode->i_size & (blocksize - 1);
+ tmp_data_len = orig_inode->i_size & (blocksize - 1);
/*
- * If data_size equal zero, it shows data_size is multiples of
+ * If data_len equal zero, it shows data_len is multiples of
* blocksize. So we set appropriate value.
*/
- if (tmp_data_size == 0)
- tmp_data_size = blocksize;
+ if (tmp_data_len == 0)
+ tmp_data_len = blocksize;
- data_size = tmp_data_size +
+ data_len = tmp_data_len +
((block_len_in_page - 1) << orig_inode->i_blkbits);
- } else
- data_size = block_len_in_page << orig_inode->i_blkbits;
-
- replaced_size = data_size;
+ } else {
+ data_len = block_len_in_page << orig_inode->i_blkbits;
+ }
- *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags,
+ ret = a_ops->write_begin(o_filp, mapping, offs, data_len, w_flags,
&page, &fsdata);
- if (unlikely(*err < 0))
+ if (unlikely(ret < 0))
goto out;
if (!PageUptodate(page)) {
/* Release old bh and drop refs */
try_to_release_page(page, 0);
- replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
- orig_blk_offset, block_len_in_page,
- &err2);
- if (err2) {
- if (replaced_count) {
- block_len_in_page = replaced_count;
- replaced_size =
- block_len_in_page << orig_inode->i_blkbits;
- } else
- goto out;
- }
+ ret = mext_replace_branches(handle, orig_inode, donor_inode,
+ orig_blk_offset, block_len_in_page);
+ if (ret < 0)
+ goto out;
+
+ /* Clear the inode cache not to refer to the old data */
+ ext4_ext_invalidate_cache(orig_inode);
+ ext4_ext_invalidate_cache(donor_inode);
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0);
bh = bh->b_this_page;
for (i = 0; i < block_len_in_page; i++) {
- *err = ext4_get_block(orig_inode,
+ ret = ext4_get_block(orig_inode,
(sector_t)(orig_blk_offset + i), bh, 0);
- if (*err < 0)
+ if (ret < 0)
goto out;
if (bh->b_this_page != NULL)
bh = bh->b_this_page;
}
- *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size,
+ ret = a_ops->write_end(o_filp, mapping, offs, data_len, data_len,
page, fsdata);
page = NULL;
out2:
ext4_journal_stop(handle);
- if (err2)
- *err = err2;
-
- return replaced_count;
+ return ret < 0 ? ret : 0;
}
/**
* @orig_start: logical start offset in block for orig
* @donor_start: logical start offset in block for donor
* @len: the number of blocks to be moved
+ * @moved_len: moved block length
*
* Check the arguments of ext4_move_extents() whether the files can be
* exchanged with each other.
*/
static int
mext_check_arguments(struct inode *orig_inode,
- struct inode *donor_inode, __u64 orig_start,
- __u64 donor_start, __u64 *len)
+ struct inode *donor_inode, __u64 orig_start,
+ __u64 donor_start, __u64 *len, __u64 moved_len)
{
ext4_lblk_t orig_blocks, donor_blocks;
unsigned int blkbits = orig_inode->i_blkbits;
return -EINVAL;
}
- if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
- ext4_debug("ext4 move extent: suid or sgid is set"
- " to donor file [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
/* Ext4 move extent does not support swapfile */
if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
return -EINVAL;
}
+ if (moved_len) {
+ ext4_debug("ext4 move extent: moved_len should be 0 "
+ "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
+ donor_inode->i_ino);
+ return -EINVAL;
+ }
+
if ((orig_start > EXT_MAX_BLOCK) ||
(donor_start > EXT_MAX_BLOCK) ||
(*len > EXT_MAX_BLOCK) ||
return -EINVAL;
}
- /* Protect orig and donor inodes against a truncate */
+ /* protect orig and donor against a truncate */
ret1 = mext_inode_double_lock(orig_inode, donor_inode);
if (ret1 < 0)
return ret1;
- /* Protect extent tree against block allocations via delalloc */
- double_down_write_data_sem(orig_inode, donor_inode);
+ mext_double_down_read(orig_inode, donor_inode);
/* Check the filesystem environment whether move_extent can be done */
ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
- donor_start, &len);
+ donor_start, &len, *moved_len);
+ mext_double_up_read(orig_inode, donor_inode);
if (ret1)
goto out;
seq_start = le32_to_cpu(ext_cur->ee_block);
rest_blocks = seq_blocks;
- /*
- * Up semaphore to avoid following problems:
- * a. transaction deadlock among ext4_journal_start,
- * ->write_begin via pagefault, and jbd2_journal_commit
- * b. racing with ->readpage, ->write_begin, and ext4_get_block
- * in move_extent_per_page
- */
- double_up_write_data_sem(orig_inode, donor_inode);
+ /* Discard preallocations of two inodes */
+ down_write(&EXT4_I(orig_inode)->i_data_sem);
+ ext4_discard_preallocations(orig_inode);
+ up_write(&EXT4_I(orig_inode)->i_data_sem);
+
+ down_write(&EXT4_I(donor_inode)->i_data_sem);
+ ext4_discard_preallocations(donor_inode);
+ up_write(&EXT4_I(donor_inode)->i_data_sem);
while (orig_page_offset <= seq_end_page) {
/* Swap original branches with new branches */
- block_len_in_page = move_extent_per_page(
- o_filp, donor_inode,
+ ret1 = move_extent_per_page(o_filp, donor_inode,
orig_page_offset,
data_offset_in_page,
- block_len_in_page, uninit,
- &ret1);
-
+ block_len_in_page, uninit);
+ if (ret1 < 0)
+ goto out;
+ orig_page_offset++;
/* Count how many blocks we have exchanged */
*moved_len += block_len_in_page;
- if (ret1 < 0)
- break;
if (*moved_len > len) {
ext4_error(orig_inode->i_sb, __func__,
"We replaced blocks too much! "
"sum of replaced: %llu requested: %llu",
*moved_len, len);
ret1 = -EIO;
- break;
+ goto out;
}
- orig_page_offset++;
data_offset_in_page = 0;
rest_blocks -= block_len_in_page;
if (rest_blocks > blocks_per_page)
block_len_in_page = rest_blocks;
}
- double_down_write_data_sem(orig_inode, donor_inode);
- if (ret1 < 0)
- break;
-
/* Decrease buffer counter */
if (holecheck_path)
ext4_ext_drop_refs(holecheck_path);
}
out:
- if (*moved_len) {
- ext4_discard_preallocations(orig_inode);
- ext4_discard_preallocations(donor_inode);
- }
-
if (orig_path) {
ext4_ext_drop_refs(orig_path);
kfree(orig_path);
ext4_ext_drop_refs(holecheck_path);
kfree(holecheck_path);
}
- double_up_write_data_sem(orig_inode, donor_inode);
+
ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
if (ret1)
* add_dirent_to_buf will attempt search the directory block for
* space. It will return -ENOSPC if no space is available, and -EIO
* and -EEXIST if directory entry already exists.
+ *
+ * NOTE! bh is NOT released in the case where ENOSPC is returned. In
+ * all other cases bh is released.
*/
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct ext4_dir_entry_2 *de,
top = bh->b_data + blocksize - reclen;
while ((char *) de <= top) {
if (!ext4_check_dir_entry("ext4_add_entry", dir, de,
- bh, offset))
+ bh, offset)) {
+ brelse(bh);
return -EIO;
- if (ext4_match(namelen, name, de))
+ }
+ if (ext4_match(namelen, name, de)) {
+ brelse(bh);
return -EEXIST;
+ }
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
if ((de->inode? rlen - nlen: rlen) >= reclen)
err = ext4_journal_get_write_access(handle, bh);
if (err) {
ext4_std_error(dir->i_sb, err);
+ brelse(bh);
return err;
}
err = ext4_handle_dirty_metadata(handle, dir, bh);
if (err)
ext4_std_error(dir->i_sb, err);
+ brelse(bh);
return 0;
}
if (!(de))
return retval;
- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
- brelse(bh);
- return retval;
+ return add_dirent_to_buf(handle, dentry, inode, de, bh);
}
/*
if(!bh)
return retval;
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
- if (retval != -ENOSPC) {
- brelse(bh);
+ if (retval != -ENOSPC)
return retval;
- }
if (blocks == 1 && !dx_fallback &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
de = (struct ext4_dir_entry_2 *) bh->b_data;
de->inode = 0;
de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
- brelse(bh);
- return retval;
+ return add_dirent_to_buf(handle, dentry, inode, de, bh);
}
/*
goto journal_error;
err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
- if (err != -ENOSPC)
+ if (err != -ENOSPC) {
+ bh = NULL;
goto cleanup;
+ }
/* Block full, should compress but for now just split */
dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
if (!de)
goto cleanup;
err = add_dirent_to_buf(handle, dentry, inode, de, bh);
+ bh = NULL;
goto cleanup;
journal_error:
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
+ 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
goto exit_bh;
if (IS_ERR(gdb = bclean(handle, sb, block))) {
- err = PTR_ERR(gdb);
+ err = PTR_ERR(bh);
goto exit_bh;
}
ext4_handle_dirty_metadata(handle, NULL, gdb);
if (sb->s_dirt)
ext4_commit_super(sb, 1);
+ ext4_release_system_zone(sb);
+ ext4_mb_release(sb);
+ ext4_ext_release(sb);
+ ext4_xattr_put_super(sb);
if (sbi->s_journal) {
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
ext4_abort(sb, __func__,
"Couldn't clean up the journal");
}
-
- ext4_release_system_zone(sb);
- ext4_mb_release(sb);
- ext4_ext_release(sb);
- ext4_xattr_put_super(sb);
-
if (!(sb->s_flags & MS_RDONLY)) {
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
es->s_state = cpu_to_le16(sbi->s_mount_state);
ei->i_allocated_meta_blocks = 0;
ei->i_delalloc_reserved_flag = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
-#ifdef CONFIG_QUOTA
- ei->i_reserved_quota = 0;
-#endif
INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
ei->cur_aio_dio = NULL;
- ei->i_sync_tid = 0;
- ei->i_datasync_tid = 0;
return &ei->vfs_inode;
}
if (test_opt(sb, NO_AUTO_DA_ALLOC))
seq_puts(seq, ",noauto_da_alloc");
- if (test_opt(sb, DISCARD))
- seq_puts(seq, ",discard");
-
- if (test_opt(sb, NOLOAD))
- seq_puts(seq, ",norecovery");
-
ext4_show_quota_options(seq, sb);
return 0;
.reserve_space = dquot_reserve_space,
.claim_space = dquot_claim_space,
.release_rsv = dquot_release_reserved_space,
-#ifdef CONFIG_QUOTA
.get_reserved_space = ext4_get_reserved_space,
-#endif
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
Opt_usrquota, Opt_grpquota, Opt_i_version,
Opt_stripe, Opt_delalloc, Opt_nodelalloc,
Opt_block_validity, Opt_noblock_validity,
- Opt_inode_readahead_blks, Opt_journal_ioprio,
- Opt_discard, Opt_nodiscard,
+ Opt_inode_readahead_blks, Opt_journal_ioprio
};
static const match_table_t tokens = {
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_noload, "noload"},
- {Opt_noload, "norecovery"},
{Opt_nobh, "nobh"},
{Opt_bh, "bh"},
{Opt_commit, "commit=%u"},
{Opt_auto_da_alloc, "auto_da_alloc=%u"},
{Opt_auto_da_alloc, "auto_da_alloc"},
{Opt_noauto_da_alloc, "noauto_da_alloc"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
{Opt_err, NULL},
};
else
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
break;
- case Opt_discard:
- set_opt(sbi->s_mount_opt, DISCARD);
- break;
- case Opt_nodiscard:
- clear_opt(sbi->s_mount_opt, DISCARD);
- break;
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
size_t size;
int i;
- sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
- groups_per_flex = 1 << sbi->s_log_groups_per_flex;
-
- if (groups_per_flex < 2) {
+ if (!sbi->s_es->s_log_groups_per_flex) {
sbi->s_log_groups_per_flex = 0;
return 1;
}
+ sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
+ groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+
/* We allocate both existing and potentially added groups */
flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter);
+ ext4_free_blocks_count_set(es, buf->f_bfree);
buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
if (buf->f_bfree < ext4_r_blocks_count(es))
buf->f_bavail = 0;
buf->f_files = le32_to_cpu(es->s_inodes_count);
buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
+ es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
buf->f_namelen = EXT4_NAME_LEN;
fsid = le64_to_cpup((void *)es->s_uuid) ^
le64_to_cpup((void *)es->s_uuid + sizeof(u64));
if (error)
goto cleanup;
- error = ext4_journal_get_write_access(handle, is.iloc.bh);
- if (error)
- goto cleanup;
-
if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
if (flags & XATTR_CREATE)
goto cleanup;
}
+ error = ext4_journal_get_write_access(handle, is.iloc.bh);
+ if (error)
+ goto cleanup;
if (!value) {
if (!is.s.not_found)
error = ext4_xattr_ibody_set(handle, inode, &i, &is);
static struct kmem_cache *fasync_cache __read_mostly;
/*
- * Remove a fasync entry. If successfully removed, return
- * positive and clear the FASYNC flag. If no entry exists,
- * do nothing and return 0.
- *
- * NOTE! It is very important that the FASYNC flag always
- * match the state "is the filp on a fasync list".
- *
- * We always take the 'filp->f_lock', in since fasync_lock
- * needs to be irq-safe.
+ * fasync_helper() is used by almost all character device drivers
+ * to set up the fasync queue. It returns negative on error, 0 if it did
+ * no changes and positive if it added/deleted the entry.
*/
-static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
+int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
+ struct fasync_struct *new = NULL;
int result = 0;
- spin_lock(&filp->f_lock);
- write_lock_irq(&fasync_lock);
- for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
- if (fa->fa_file != filp)
- continue;
- *fp = fa->fa_next;
- kmem_cache_free(fasync_cache, fa);
- filp->f_flags &= ~FASYNC;
- result = 1;
- break;
+ if (on) {
+ new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
}
- write_unlock_irq(&fasync_lock);
- spin_unlock(&filp->f_lock);
- return result;
-}
-
-/*
- * Add a fasync entry. Return negative on error, positive if
- * added, and zero if did nothing but change an existing one.
- *
- * NOTE! It is very important that the FASYNC flag always
- * match the state "is the filp on a fasync list".
- */
-static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
-{
- struct fasync_struct *new, *fa, **fp;
- int result = 0;
-
- new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
+ /*
+ * We need to take f_lock first since it's not an IRQ-safe
+ * lock.
+ */
spin_lock(&filp->f_lock);
write_lock_irq(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
- if (fa->fa_file != filp)
- continue;
- fa->fa_fd = fd;
- kmem_cache_free(fasync_cache, new);
- goto out;
+ if (fa->fa_file == filp) {
+ if(on) {
+ fa->fa_fd = fd;
+ kmem_cache_free(fasync_cache, new);
+ } else {
+ *fp = fa->fa_next;
+ kmem_cache_free(fasync_cache, fa);
+ result = 1;
+ }
+ goto out;
+ }
}
- new->magic = FASYNC_MAGIC;
- new->fa_file = filp;
- new->fa_fd = fd;
- new->fa_next = *fapp;
- *fapp = new;
- result = 1;
- filp->f_flags |= FASYNC;
-
+ if (on) {
+ new->magic = FASYNC_MAGIC;
+ new->fa_file = filp;
+ new->fa_fd = fd;
+ new->fa_next = *fapp;
+ *fapp = new;
+ result = 1;
+ }
out:
+ if (on)
+ filp->f_flags |= FASYNC;
+ else
+ filp->f_flags &= ~FASYNC;
write_unlock_irq(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
-/*
- * fasync_helper() is used by almost all character device drivers
- * to set up the fasync queue, and for regular files by the file
- * lease code. It returns negative on error, 0 if it did no changes
- * and positive if it added/deleted the entry.
- */
-int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
-{
- if (!on)
- return fasync_remove_entry(filp, fapp);
- return fasync_add_entry(fd, filp, fapp);
-}
-
EXPORT_SYMBOL(fasync_helper);
void __kill_fasync(struct fasync_struct *fa, int sig, int band)
if (!page)
break;
- if (mapping_writably_mapped(mapping))
- flush_dcache_page(page);
-
pagefault_disable();
tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
pagefault_enable();
err = hfs_brec_find(&src_fd);
if (err)
goto out;
- if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
- err = -EIO;
- goto out;
- }
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength);
filp->f_pos++;
/* fall through */
case 1:
- if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
- err = -EIO;
- goto out;
- }
-
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
if (entry.type != HFS_CDR_THD) {
printk(KERN_ERR "hfs: bad catalog folder thread\n");
err = -EIO;
goto out;
}
-
- if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
- err = -EIO;
- goto out;
- }
-
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
type = entry.type;
len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
/* try to get the root inode */
hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
- if (!res) {
- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
- res = -EIO;
- goto bail;
- }
+ if (!res)
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
- }
if (res) {
hfs_find_exit(&fd);
goto bail_no_root;
JBUFFER_TRACE(jh, "ph3: write metadata");
flags = jbd2_journal_write_metadata_buffer(commit_transaction,
jh, &new_jh, blocknr);
- if (flags < 0) {
- jbd2_journal_abort(journal, flags);
- continue;
- }
set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
wbuf[bufs++] = jh2bh(new_jh);
EXPORT_SYMBOL(jbd2_journal_ack_err);
EXPORT_SYMBOL(jbd2_journal_clear_err);
EXPORT_SYMBOL(jbd2_log_wait_commit);
-EXPORT_SYMBOL(jbd2_log_start_commit);
EXPORT_SYMBOL(jbd2_journal_start_commit);
EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
EXPORT_SYMBOL(jbd2_journal_wipe);
jbd_unlock_bh_state(bh_in);
tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
- if (!tmp) {
- jbd2_journal_put_journal_head(new_jh);
- return -ENOMEM;
- }
jbd_lock_bh_state(bh_in);
if (jh_in->b_frozen_data) {
jbd2_free(tmp, bh_in->b_size);
if (jbd2_journal_recover(journal))
goto recovery_error;
- if (journal->j_failed_commit) {
- printk(KERN_ERR "JBD2: journal transaction %u on %s "
- "is corrupt.\n", journal->j_failed_commit,
- journal->j_devname);
- return -EIO;
- }
-
/* OK, we've finished with the dynamic journal bits:
* reinitialise the dynamic contents of the superblock in memory
* and reset them on disk. */
struct jffs2_raw_inode ri;
struct jffs2_node_frag *last_frag;
union jffs2_device_node dev;
- char *mdata = NULL;
- int mdatalen = 0;
+ char *mdata = NULL, mdatalen = 0;
uint32_t alloclen, ilen;
int ret;
/*
* Searching includes executable on directories, else just read.
*/
- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
if (capable(CAP_DAC_READ_SEARCH))
return 0;
data->res.fattr = &data->fattr;
data->res.eof = 0;
data->res.count = bytes;
- nfs_fattr_init(&data->fattr);
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
- nfs_fattr_init(&data->fattr);
NFS_PROTO(data->inode)->commit_setup(data, &msg);
data->res.fattr = &data->fattr;
data->res.count = bytes;
data->res.verf = &data->verf;
- nfs_fattr_init(&data->fattr);
task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
{
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
- if (gfp & __GFP_WAIT)
- nfs_wb_page(page->mapping->host, page);
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
*/
int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
- if (PageFsCache(page)) {
- struct nfs_inode *nfsi = NFS_I(page->mapping->host);
- struct fscache_cookie *cookie = nfsi->fscache;
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ struct fscache_cookie *cookie = nfsi->fscache;
- BUG_ON(!cookie);
+ BUG_ON(!cookie);
+
+ if (PageFsCache(page)) {
dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
cookie, page, nfsi);
{ .status = MNT3ERR_INVAL, .errno = -EINVAL, },
{ .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
{ .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
- { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
};
struct mountres {
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
{ NFSERR_NOTSUPP, -ENOTSUPP },
{ NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_SERVERFAULT, -ESERVERFAULT },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
{ -1, -EIO }
NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
- NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
};
struct nfs4_state {
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
- set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
.rpc_release = nfs4_lock_release,
};
-static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
-{
- struct nfs_client *clp = server->nfs_client;
- struct nfs4_state *state = lsp->ls_state;
-
- switch (error) {
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_EXPIRED:
- if (new_lock_owner != 0 ||
- (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
- nfs4_state_mark_reclaim_nograce(clp, state);
- lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
- };
-}
-
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
{
struct nfs4_lockdata *data;
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
- if (ret)
- nfs4_handle_setlk_error(data->server, data->lsp,
- data->arg.new_lock_owner, ret);
} else
data->cancelled = 1;
rpc_put_task(task);
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
- int status = -ENOLCK;
+ int status;
- if ((fl_flags & FL_POSIX) &&
- !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
- goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
* If the server returns different values for sessionID, slotID or
* sequence number, the server is looney tunes.
*/
- status = -EREMOTEIO;
+ status = -ESERVERFAULT;
if (memcmp(id.data, res->sr_session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
{ NFS4ERR_BADTYPE, -EBADTYPE },
{ NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_SYMLINK, -ELOOP },
}
if (stat <= 10000 || stat > 10100) {
/* The server is looney tunes. */
- return -EREMOTEIO;
+ return -ESERVERFAULT;
}
/* If we cannot translate the error, the recovery routines should
* handle it.
kref_put(&req->wb_kref, nfs_free_request);
}
-static int nfs_wait_bit_uninterruptible(void *word)
-{
- io_schedule();
- return 0;
-}
-
/**
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
int
nfs_wait_on_request(struct nfs_page *req)
{
- return wait_on_bit(&req->wb_flags, PG_BUSY,
- nfs_wait_bit_uninterruptible,
- TASK_UNINTERRUPTIBLE);
+ int ret = 0;
+
+ if (!test_bit(PG_BUSY, &req->wb_flags))
+ goto out;
+ ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+out:
+ return ret;
}
/**
static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
static int nfs_xdev_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
-static void nfs_put_super(struct super_block *);
static void nfs_kill_super(struct super_block *);
static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
- .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs_clear_inode,
.umount_begin = nfs_umount_begin,
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
- .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs4_clear_inode,
.umount_begin = nfs_umount_begin,
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data) {
+ data->rsize = NFS_MAX_FILE_IO_SIZE;
+ data->wsize = NFS_MAX_FILE_IO_SIZE;
data->acregmin = NFS_DEF_ACREGMIN;
data->acregmax = NFS_DEF_ACREGMAX;
data->acdirmin = NFS_DEF_ACDIRMIN;
goto out;
}
-/*
- * Ensure that we unregister the bdi before kill_anon_super
- * releases the device name
- */
-static void nfs_put_super(struct super_block *s)
-{
- struct nfs_server *server = NFS_SB(s);
-
- bdi_unregister(&server->backing_dev_info);
-}
-
/*
* Destroy an NFS2/3 superblock
*/
struct nfs_server *server = NFS_SB(s);
kill_anon_super(s);
+ bdi_unregister(&server->backing_dev_info);
nfs_fscache_release_super_cookie(s);
nfs_free_server(server);
}
break;
}
ret = nfs_wait_on_request(req);
- nfs_release_request(req);
if (ret < 0)
goto out;
}
if (ret)
goto out_unlock;
page_cache_get(newpage);
- spin_lock(&mapping->host->i_lock);
req->wb_page = newpage;
SetPagePrivate(newpage);
- set_page_private(newpage, (unsigned long)req);
+ set_page_private(newpage, page_private(page));
ClearPagePrivate(page);
set_page_private(page, 0);
- spin_unlock(&mapping->host->i_lock);
page_cache_release(page);
out_unlock:
nfs_clear_page_tag_locked(req);
+ nfs_release_request(req);
out:
return ret;
}
sort_pacl_range(pacl, 1, i-1);
BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ);
- j = ++i;
+ j = i++;
while (pacl->a_entries[j].e_tag == ACL_GROUP)
j++;
sort_pacl_range(pacl, i, j-1);
int (*fsync) (struct file *, struct dentry *, int);
int err;
- err = filemap_write_and_wait(inode->i_mapping);
+ err = filemap_fdatawrite(inode->i_mapping);
if (err == 0 && fop && (fsync = fop->fsync))
err = fsync(filp, dp, 0);
+ if (err == 0)
+ err = filemap_fdatawait(inode->i_mapping);
+
return err;
}
if (warned)
return 0;
- warned = true;
+ warned = false;
entry = p;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
- group->inotify_data.last_wd+1,
+ group->inotify_data.last_wd,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
- group->inotify_data.last_wd = 0;
+ group->inotify_data.last_wd = 1;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
/************************************************************
* EFI GUID Partition Table handling
- *
- * http://www.uefi.org/specs/
- * http://www.intel.com/technology/efi/
- *
+ * Per Intel EFI Specification v1.02
+ * http://developer.intel.com/technology/efi/efi.htm
* efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
* Copyright 2000,2001,2002,2004 Dell Inc.
*
*
************************************************************/
#include <linux/crc32.h>
-#include <linux/math64.h>
#include "check.h"
#include "efi.h"
{
if (!bdev || !bdev->bd_inode)
return 0;
- return div_u64(bdev->bd_inode->i_size,
- bdev_logical_block_size(bdev)) - 1ULL;
+ return (bdev->bd_inode->i_size >> 9) - 1ULL;
}
static inline int
read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
{
size_t totalreadcount = 0;
- sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
if (!bdev || !buffer || lba > last_lba(bdev))
return 0;
while (count) {
int copied = 512;
Sector sect;
- unsigned char *data = read_dev_sector(bdev, n++, §);
+ unsigned char *data = read_dev_sector(bdev, lba++, §);
if (!data)
break;
if (copied > count)
alloc_read_gpt_header(struct block_device *bdev, u64 lba)
{
gpt_header *gpt;
- unsigned ssz = bdev_logical_block_size(bdev);
-
if (!bdev)
return NULL;
- gpt = kzalloc(ssz, GFP_KERNEL);
+ gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL);
if (!gpt)
return NULL;
- if (read_lba(bdev, lba, (u8 *) gpt, ssz) < ssz) {
+ if (read_lba(bdev, lba, (u8 *) gpt,
+ sizeof (gpt_header)) < sizeof (gpt_header)) {
kfree(gpt);
gpt=NULL;
return NULL;
gpt_header *gpt = NULL;
gpt_entry *ptes = NULL;
u32 i;
- unsigned ssz = bdev_logical_block_size(bdev) / 512;
if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
kfree(gpt);
pr_debug("GUID Partition Table is valid! Yea!\n");
for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
- u64 start = le64_to_cpu(ptes[i].starting_lba);
- u64 size = le64_to_cpu(ptes[i].ending_lba) -
- le64_to_cpu(ptes[i].starting_lba) + 1ULL;
-
if (!is_pte_valid(&ptes[i], last_lba(bdev)))
continue;
- put_partition(state, i+1, start * ssz, size * ssz);
+ put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba),
+ (le64_to_cpu(ptes[i].ending_lba) -
+ le64_to_cpu(ptes[i].starting_lba) +
+ 1ULL));
/* If this is a RAID volume, tell md */
if (!efi_guidcmp(ptes[i].partition_type_guid,
#define EFI_PMBR_OSTYPE_EFI 0xEF
#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
+#define GPT_BLOCK_SIZE 512
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
__le32 num_partition_entries;
__le32 sizeof_partition_entry;
__le32 partition_entry_array_crc32;
-
- /* The rest of the logical block is reserved by UEFI and must be zero.
- * EFI standard handles this by:
- *
- * uint8_t reserved2[ BlockSize - 92 ];
- */
+ u8 reserved2[GPT_BLOCK_SIZE - 92];
} __attribute__ ((packed)) gpt_header;
typedef struct _gpt_entry_attributes {
}
EXPORT_SYMBOL(vfs_dq_drop);
-/*
- * inode_reserved_space is managed internally by quota, and protected by
- * i_lock similar to i_blocks+i_bytes.
- */
-static qsize_t *inode_reserved_space(struct inode * inode)
-{
- /* Filesystem must explicitly define it's own method in order to use
- * quota reservation interface */
- BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
- return inode->i_sb->dq_op->get_reserved_space(inode);
-}
-
-static void inode_add_rsv_space(struct inode *inode, qsize_t number)
-{
- spin_lock(&inode->i_lock);
- *inode_reserved_space(inode) += number;
- spin_unlock(&inode->i_lock);
-}
-
-
-static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
-{
- spin_lock(&inode->i_lock);
- *inode_reserved_space(inode) -= number;
- __inode_add_bytes(inode, number);
- spin_unlock(&inode->i_lock);
-}
-
-static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
-{
- spin_lock(&inode->i_lock);
- *inode_reserved_space(inode) -= number;
- spin_unlock(&inode->i_lock);
-}
-
-static qsize_t inode_get_rsv_space(struct inode *inode)
-{
- qsize_t ret;
-
- if (!inode->i_sb->dq_op->get_reserved_space)
- return 0;
- spin_lock(&inode->i_lock);
- ret = *inode_reserved_space(inode);
- spin_unlock(&inode->i_lock);
- return ret;
-}
-
-static void inode_incr_space(struct inode *inode, qsize_t number,
- int reserve)
-{
- if (reserve)
- inode_add_rsv_space(inode, number);
- else
- inode_add_bytes(inode, number);
-}
-
-static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
-{
- if (reserve)
- inode_sub_rsv_space(inode, number);
- else
- inode_sub_bytes(inode, number);
-}
-
/*
* Following four functions update i_blocks+i_bytes fields and
* quota information (together with appropriate checks)
int cnt, ret = QUOTA_OK;
char warntype[MAXQUOTAS];
- /*
- * First test before acquiring mutex - solves deadlocks when we
- * re-enter the quota code and are already holding the mutex
- */
- if (IS_NOQUOTA(inode)) {
- inode_incr_space(inode, number, reserve);
- goto out;
- }
-
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) {
- inode_incr_space(inode, number, reserve);
- goto out_unlock;
- }
-
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
== NO_QUOTA) {
ret = NO_QUOTA;
- spin_unlock(&dq_data_lock);
- goto out_flush_warn;
+ goto out_unlock;
}
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
else
dquot_incr_space(inode->i_dquot[cnt], number);
}
- inode_incr_space(inode, number, reserve);
+ if (!reserve)
+ inode_add_bytes(inode, number);
+out_unlock:
spin_unlock(&dq_data_lock);
+ flush_warnings(inode->i_dquot, warntype);
+ return ret;
+}
+
+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+{
+ int cnt, ret = QUOTA_OK;
+
+ /*
+ * First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex
+ */
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out_unlock;
+ }
+
+ ret = __dquot_alloc_space(inode, number, warn, 0);
+ if (ret == NO_QUOTA)
+ goto out_unlock;
- if (reserve)
- goto out_flush_warn;
/* Dirtify all the dquots - this can block when journalling */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (inode->i_dquot[cnt])
mark_dquot_dirty(inode->i_dquot[cnt]);
-out_flush_warn:
- flush_warnings(inode->i_dquot, warntype);
out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return ret;
}
-
-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
-{
- return __dquot_alloc_space(inode, number, warn, 0);
-}
EXPORT_SYMBOL(dquot_alloc_space);
int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
{
- return __dquot_alloc_space(inode, number, warn, 1);
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ ret = __dquot_alloc_space(inode, number, warn, 1);
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return ret;
}
EXPORT_SYMBOL(dquot_reserve_space);
int ret = QUOTA_OK;
if (IS_NOQUOTA(inode)) {
- inode_claim_rsv_space(inode, number);
+ inode_add_bytes(inode, number);
goto out;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- inode_claim_rsv_space(inode, number);
+ inode_add_bytes(inode, number);
goto out;
}
number);
}
/* Update inode bytes */
- inode_claim_rsv_space(inode, number);
+ inode_add_bytes(inode, number);
spin_unlock(&dq_data_lock);
/* Dirtify all the dquots - this can block when journalling */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
}
EXPORT_SYMBOL(dquot_claim_space);
+/*
+ * Release reserved quota space
+ */
+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+{
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ spin_lock(&dq_data_lock);
+ /* Release reserved dquots */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt])
+ dquot_free_reserved_space(inode->i_dquot[cnt], number);
+ }
+ spin_unlock(&dq_data_lock);
+
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return;
+}
+EXPORT_SYMBOL(dquot_release_reserved_space);
+
/*
* This operation can block, but only after everything is updated
*/
-int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
+int dquot_free_space(struct inode *inode, qsize_t number)
{
unsigned int cnt;
char warntype[MAXQUOTAS];
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_sub:
- inode_decr_space(inode, number, reserve);
+ inode_sub_bytes(inode, number);
return QUOTA_OK;
}
if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
- if (reserve)
- dquot_free_reserved_space(inode->i_dquot[cnt], number);
- else
- dquot_decr_space(inode->i_dquot[cnt], number);
+ dquot_decr_space(inode->i_dquot[cnt], number);
}
- inode_decr_space(inode, number, reserve);
+ inode_sub_bytes(inode, number);
spin_unlock(&dq_data_lock);
-
- if (reserve)
- goto out_unlock;
/* Dirtify all the dquots - this can block when journalling */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (inode->i_dquot[cnt])
mark_dquot_dirty(inode->i_dquot[cnt]);
-out_unlock:
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
-
-int dquot_free_space(struct inode *inode, qsize_t number)
-{
- return __dquot_free_space(inode, number, 0);
-}
EXPORT_SYMBOL(dquot_free_space);
-/*
- * Release reserved quota space
- */
-void dquot_release_reserved_space(struct inode *inode, qsize_t number)
-{
- __dquot_free_space(inode, number, 1);
-
-}
-EXPORT_SYMBOL(dquot_release_reserved_space);
-
/*
* This operation can block, but only after everything is updated
*/
}
EXPORT_SYMBOL(dquot_free_inode);
+/*
+ * call back function, get reserved quota space from underlying fs
+ */
+qsize_t dquot_get_reserved_space(struct inode *inode)
+{
+ qsize_t reserved_space = 0;
+
+ if (sb_any_quota_active(inode->i_sb) &&
+ inode->i_sb->dq_op->get_reserved_space)
+ reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+ return reserved_space;
+}
+
/*
* Transfer the number of inode and blocks from one diskquota to an other.
*
}
spin_lock(&dq_data_lock);
cur_space = inode_get_bytes(inode);
- rsv_space = inode_get_rsv_space(inode);
+ rsv_space = dquot_get_reserved_space(inode);
space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
return reiserfs_write_full_page(page, wbc);
}
-static void reiserfs_truncate_failed_write(struct inode *inode)
-{
- truncate_inode_pages(inode->i_mapping, inode->i_size);
- reiserfs_truncate_file(inode, 0);
-}
-
static int reiserfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
if (ret) {
unlock_page(page);
page_cache_release(page);
- /* Truncate allocated blocks */
- reiserfs_truncate_failed_write(inode);
}
return ret;
}
** transaction tracking stuff when the size changes. So, we have
** to do the i_size updates here.
*/
- if (pos + copied > inode->i_size) {
+ pos += copied;
+ if (pos > inode->i_size) {
struct reiserfs_transaction_handle myth;
reiserfs_write_lock(inode->i_sb);
/* If the file have grown beyond the border where it
goto journal_error;
}
reiserfs_update_inode_transaction(inode);
- inode->i_size = pos + copied;
+ inode->i_size = pos;
/*
* this will just nest into our transaction. It's important
* to use mark_inode_dirty so the inode gets pushed around on the
out:
unlock_page(page);
page_cache_release(page);
-
- if (pos + len > inode->i_size)
- reiserfs_truncate_failed_write(inode);
-
return ret == 0 ? copied : ret;
journal_error:
error_rsb_inval:
ret = -EINVAL;
error_rsb:
- kfree(rsb);
return ret;
}
}
#endif /* __ARCH_WANT_STAT64 */
-/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
-void __inode_add_bytes(struct inode *inode, loff_t bytes)
+void inode_add_bytes(struct inode *inode, loff_t bytes)
{
+ spin_lock(&inode->i_lock);
inode->i_blocks += bytes >> 9;
bytes &= 511;
inode->i_bytes += bytes;
inode->i_blocks++;
inode->i_bytes -= 512;
}
-}
-
-void inode_add_bytes(struct inode *inode, loff_t bytes)
-{
- spin_lock(&inode->i_lock);
- __inode_add_bytes(inode, bytes);
spin_unlock(&inode->i_lock);
}
return error;
}
s->s_flags |= MS_ACTIVE;
- } else {
- do_remount_sb(s, flags, data, 0);
}
+ do_remount_sb(s, flags, data, 0);
simple_set_mnt(mnt, s);
return 0;
}
if (!sd_attrs)
return -ENOMEM;
sd->s_iattr = sd_attrs;
- }
- /* attributes were changed at least once in past */
- iattrs = &sd_attrs->ia_iattr;
-
- if (ia_valid & ATTR_UID)
- iattrs->ia_uid = iattr->ia_uid;
- if (ia_valid & ATTR_GID)
- iattrs->ia_gid = iattr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- iattrs->ia_atime = timespec_trunc(iattr->ia_atime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_MTIME)
- iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_CTIME)
- iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
-
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
- mode &= ~S_ISGID;
- iattrs->ia_mode = sd->s_mode = mode;
+ } else {
+ /* attributes were changed at least once in past */
+ iattrs = &sd_attrs->ia_iattr;
+
+ if (ia_valid & ATTR_UID)
+ iattrs->ia_uid = iattr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ iattrs->ia_gid = iattr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+ iattrs->ia_atime = timespec_trunc(iattr->ia_atime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_MTIME)
+ iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_CTIME)
+ iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+
+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ mode &= ~S_ISGID;
+ iattrs->ia_mode = sd->s_mode = mode;
+ }
}
return error;
}
return 0;
}
-static void udf_find_vat_block(struct super_block *sb, int p_index,
- int type1_index, sector_t start_block)
-{
- struct udf_sb_info *sbi = UDF_SB(sb);
- struct udf_part_map *map = &sbi->s_partmaps[p_index];
- sector_t vat_block;
- struct kernel_lb_addr ino;
-
- /*
- * VAT file entry is in the last recorded block. Some broken disks have
- * it a few blocks before so try a bit harder...
- */
- ino.partitionReferenceNum = type1_index;
- for (vat_block = start_block;
- vat_block >= map->s_partition_root &&
- vat_block >= start_block - 3 &&
- !sbi->s_vat_inode; vat_block--) {
- ino.logicalBlockNum = vat_block - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, &ino);
- }
-}
-
static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map = &sbi->s_partmaps[p_index];
+ struct kernel_lb_addr ino;
struct buffer_head *bh = NULL;
struct udf_inode_info *vati;
uint32_t pos;
struct virtualAllocationTable20 *vat20;
sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
- udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
+ /* VAT file entry is in the last recorded block */
+ ino.partitionReferenceNum = type1_index;
+ ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
+ sbi->s_vat_inode = udf_iget(sb, &ino);
if (!sbi->s_vat_inode &&
sbi->s_last_block != blocks - 1) {
printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
"block of the device (%lu).\n",
(unsigned long)sbi->s_last_block,
(unsigned long)blocks - 1);
- udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
+ ino.partitionReferenceNum = type1_index;
+ ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
+ sbi->s_vat_inode = udf_iget(sb, &ino);
}
if (!sbi->s_vat_inode)
return 1;
#include <linux/hardirq.h>
#define ACPI_PREEMPTION_POINT() \
do { \
- if (!in_atomic_preempt_off() && !irqs_disabled()) \
+ if (!in_atomic_preempt_off()) \
cond_resched(); \
} while (0)
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
-extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
struct drm_ati_pcigart_info * gart_info);
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align);
+ size_t align, dma_addr_t maxaddr);
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
remove_wait_queue(&(queue), &entry); \
} while (0)
-#define DRM_WAKEUP( queue ) wake_up( queue )
+#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kobject.h>
-#include <linux/mm.h>
/**
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
void __init acpi_s4_no_nvs(void);
#endif /* CONFIG_PM_SLEEP */
-struct acpi_osc_context {
- char *uuid_str; /* uuid string */
- int rev;
- struct acpi_buffer cap; /* arg2/arg3 */
- struct acpi_buffer ret; /* free by caller if success */
-};
-
#define OSC_QUERY_TYPE 0
#define OSC_SUPPORT_TYPE 1
#define OSC_CONTROL_TYPE 2
#define OSC_INVALID_REVISION_ERROR 8
#define OSC_CAPABILITIES_MASK_ERROR 16
-acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-
-/* platform-wide _OSC bits */
-#define OSC_SB_PAD_SUPPORT 1
-#define OSC_SB_PPC_OST_SUPPORT 2
-#define OSC_SB_PR3_SUPPORT 4
-#define OSC_SB_CPUHP_OST_SUPPORT 8
-#define OSC_SB_APEI_SUPPORT 16
-
/* _OSC DW1 Definition (OS Support Fields) */
#define OSC_EXT_PCI_CONFIG_SUPPORT 1
#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
extern int __must_check remove_arg_zero(struct linux_binprm *);
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
extern int flush_old_exec(struct linux_binprm * bprm);
-extern void setup_new_exec(struct linux_binprm * bprm);
extern int suid_dumpable;
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
extern void blk_set_default_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
return q->limits.alignment_offset;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
-{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
-
- offset &= granularity - 1;
- return (granularity + lim->alignment_offset - offset) & (granularity - 1);
-}
-
static inline int queue_sector_alignment_offset(struct request_queue *q,
sector_t sector)
{
- return queue_limit_alignment_offset(&q->limits, sector << 9);
+ return ((sector << 9) - q->limits.alignment_offset)
+ & (q->limits.io_min - 1);
}
static inline int bdev_alignment_offset(struct block_device *bdev)
* subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
- * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
* @flags: flags describing special properties
* @vread: vsyscall based read
* @resume: resume function for the clocksource, if necessary
cycle_t mask;
u32 mult;
u32 shift;
- u64 max_idle_ns;
unsigned long flags;
cycle_t (*vread)(void);
void (*resume)(void);
#include <linux/types.h>
+#define CN_IDX_CONNECTOR 0xffffffff
+#define CN_VAL_CONNECTOR 0xffffffff
+
/*
* Process Events connector unique ids -- used for message routing
*/
__u8 data[0];
};
+/*
+ * Notify structure - requests notification about
+ * registering/unregistering idx/val in range [first, first+range].
+ */
+struct cn_notify_req {
+ __u32 first;
+ __u32 range;
+};
+
+/*
+ * Main notification control message
+ * *_notify_num - number of appropriate cn_notify_req structures after
+ * this struct.
+ * group - notification receiver's idx.
+ * len - total length of the attached data.
+ */
+struct cn_ctl_msg {
+ __u32 idx_notify_num;
+ __u32 val_notify_num;
+ __u32 group;
+ __u32 len;
+ __u8 data[0];
+};
+
#ifdef __KERNEL__
#include <asm/atomic.h>
u32 seq, group;
};
+struct cn_ctl_entry {
+ struct list_head notify_entry;
+ struct cn_ctl_msg *msg;
+};
+
struct cn_dev {
struct cb_id id;
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
-#define num_active_cpus() cpumask_weight(cpu_active_mask)
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
#define num_online_cpus() 1
#define num_possible_cpus() 1
#define num_present_cpus() 1
-#define num_active_cpus() 1
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
ENCLOSURE_STATUS_NOT_INSTALLED,
ENCLOSURE_STATUS_UNKNOWN,
ENCLOSURE_STATUS_UNAVAILABLE,
- /* last element for counting purposes */
- ENCLOSURE_STATUS_MAX
};
/* SFF-8485 activity light settings */
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
-#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
/*
* This is the global environment of the parser. This information is
static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
{
- if (likely(!timer_stats_active))
+ if (likely(!timer->start_site))
return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
timer->function, timer->start_comm, 0);
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
+ if (likely(!timer_stats_active))
+ return;
__timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
}
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
-#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
__u64 cr8;
__u64 apic_base;
-#ifdef __KVM_S390
- /* the processor status word for s390 */
- __u64 psw_mask; /* psw upper half */
- __u64 psw_addr; /* psw lower half */
-#endif
union {
/* KVM_EXIT_UNKNOWN */
struct {
/* KVM_EXIT_S390_SIEIC */
struct {
__u8 icptcode;
+ __u64 mask; /* psw upper half */
+ __u64 addr; /* psw lower half */
__u16 ipa;
__u32 ipb;
} s390_sieic;
#endif
#define KVM_CAP_IOEVENTFD 36
#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
-#define KVM_CAP_ADJUST_CLOCK 39
#ifdef KVM_CAP_IRQ_ROUTING
};
#endif
-#define KVM_CAP_S390_PSW 42
#ifdef KVM_CAP_MCE
/* x86 MCE */
__u8 pad[20];
};
-struct kvm_clock_data {
- __u64 clock;
- __u32 flags;
- __u32 pad[9];
-};
-
/*
* ioctls for VM fds
*/
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
-#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
-#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
/*
* ioctls for vcpu fds
/* max tries if error condition is still set after ->error_handler */
ATA_EH_MAX_TRIES = 5,
- /* sometimes resuming a link requires several retries */
- ATA_LINK_RESUME_TRIES = 5,
-
/* how hard are we gonna try to probe/recover devices */
ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
-#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
-#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
-#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
-#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
-#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
-#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
-#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
-#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
-#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
-#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
-#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
-#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
-#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
-#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
+#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4)
+#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4)
+#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4)
+#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4)
+#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4)
+#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4)
+#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4)
+#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0)
+#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0)
+#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0)
+#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0)
+#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0)
+#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0)
+#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0)
/*
* Regulator Interrupts.
extern struct page * read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler,
void *data);
-extern struct page * read_cache_page_gfp(struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
resource_size_t);
void pcibios_update_irq(struct pci_dev *, int irq);
-/* Weak but can be overriden by arch */
-void pci_fixup_cardbus(struct pci_bus *);
-
/* Generic PCI functions used internally */
extern struct pci_bus *pci_find_bus(int domain, int busnr);
#define PCI_DEVICE_ID_MPC8536 0x0051
#define PCI_DEVICE_ID_P2020E 0x0070
#define PCI_DEVICE_ID_P2020 0x0071
-#define PCI_DEVICE_ID_P2010E 0x0078
-#define PCI_DEVICE_ID_P2010 0x0079
-#define PCI_DEVICE_ID_P1020E 0x0100
-#define PCI_DEVICE_ID_P1020 0x0101
-#define PCI_DEVICE_ID_P1011E 0x0108
-#define PCI_DEVICE_ID_P1011 0x0109
-#define PCI_DEVICE_ID_P1022E 0x0110
-#define PCI_DEVICE_ID_P1022 0x0111
-#define PCI_DEVICE_ID_P1013E 0x0118
-#define PCI_DEVICE_ID_P1013 0x0119
-#define PCI_DEVICE_ID_P4080E 0x0400
-#define PCI_DEVICE_ID_P4080 0x0401
-#define PCI_DEVICE_ID_P4040E 0x0408
-#define PCI_DEVICE_ID_P4040 0x0409
#define PCI_DEVICE_ID_MPC8641 0x7010
#define PCI_DEVICE_ID_MPC8641D 0x7011
#define PCI_DEVICE_ID_MPC8610 0x7018
#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
#define PERF_EVENT_IOC_RESET _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
enum perf_event_ioc_flags {
int (*claim_space) (struct inode *, qsize_t);
/* release rsved quota for delayed alloc */
void (*release_rsv) (struct inode *, qsize_t);
- /* get reserved quota for delayed alloc, value returned is managed by
- * quota code only */
- qsize_t *(*get_reserved_space) (struct inode *);
+ /* get reserved quota for delayed alloc */
+ qsize_t (*get_reserved_space) (struct inode *);
};
/* Operations handling requests from userspace */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
- - initialized normally by setup_new_exec */
+ - initialized normally by flush_old_exec */
/* file system info */
int link_count, total_link_count;
#ifdef CONFIG_SYSVIPC
return info <= SEND_SIG_FORCED;
}
-/*
- * True if we are on the alternate signal stack.
- */
+/* True if we are on the alternate signal stack. */
+
static inline int on_sig_stack(unsigned long sp)
{
-#ifdef CONFIG_STACK_GROWSUP
- return sp >= current->sas_ss_sp &&
- sp - current->sas_ss_sp < current->sas_ss_size;
-#else
- return sp > current->sas_ss_sp &&
- sp - current->sas_ss_sp <= current->sas_ss_size;
-#endif
+ return (sp - current->sas_ss_sp < current->sas_ss_size);
}
static inline int sas_ss_flags(unsigned long sp)
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
-static inline unsigned long task_rlimit(const struct task_struct *tsk,
- unsigned int limit)
-{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
-}
-
-static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
- unsigned int limit)
-{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
-}
-
-static inline unsigned long rlimit(unsigned int limit)
-{
- return task_rlimit(current, limit);
-}
-
-static inline unsigned long rlimit_max(unsigned int limit)
-{
- return task_rlimit_max(current, limit);
-}
-
#endif /* __KERNEL__ */
#endif
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
-#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
-#else
-#define dac_mmap_min_addr 0UL
-#endif
-
/*
* Values used in the task_security_ops calls
*/
#define LSM_UNSAFE_PTRACE 2
#define LSM_UNSAFE_PTRACE_CAP 4
-#ifdef CONFIG_MMU
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
}
extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-#endif
#ifdef CONFIG_SECURITY
asmlinkage long sys_perf_event_open(
struct perf_event_attr __user *attr_uptr,
pid_t pid, int cpu, int group_fd, unsigned long flags);
-
-asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
#endif
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
- NET_IPV4_CONF_SRC_VMARK=24,
__NET_IPV4_CONF_MAX
};
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
-extern u64 timekeeping_max_deferment(void);
extern void update_wall_time(void);
extern void update_xtime_cache(u64 nsec);
extern void timekeeping_leap_insert(int leapsecond);
US_FLAG(SANE_SENSE, 0x00008000) \
/* Sane Sense (> 18 bytes) */ \
US_FLAG(CAPACITY_OK, 0x00010000) \
- /* READ CAPACITY response is correct */ \
- US_FLAG(BAD_SENSE, 0x00020000) \
- /* Bad Sense (never more than 18 bytes) */
+ /* READ CAPACITY response is correct */
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
extern struct vm_struct *vmlist;
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align, gfp_t gfp_mask);
-#endif
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
IP_DEFRAG_CALL_RA_CHAIN,
IP_DEFRAG_CONNTRACK_IN,
IP_DEFRAG_CONNTRACK_OUT,
- IP_DEFRAG_CONNTRACK_BRIDGE_IN,
IP_DEFRAG_VS_IN,
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD
struct inet_frag_queue;
-enum ip6_defrag_users {
- IP6_DEFRAG_LOCAL_DELIVER,
- IP6_DEFRAG_CONNTRACK_IN,
- IP6_DEFRAG_CONNTRACK_OUT,
- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
-};
-
struct ip6_create_arg {
__be32 id;
- u32 user;
struct in6_addr *src;
struct in6_addr *dst;
};
extern int nf_ct_frag6_init(void);
extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
struct net_device *in,
struct net_device *out,
struct netns_ct {
atomic_t count;
unsigned int expect_count;
- unsigned int htable_size;
- struct kmem_cache *nf_conntrack_cachep;
struct hlist_nulls_head *hash;
struct hlist_head *expect_hash;
struct hlist_nulls_head unconfirmed;
#endif
int hash_vmalloc;
int expect_vmalloc;
- char *slabname;
};
#endif
struct xt_table *iptable_security;
struct xt_table *nat_table;
struct hlist_head *nat_bysource;
- unsigned int nat_htable_size;
int nat_vmalloced;
#endif
static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
{
if (atomic_dec_and_test(&nr_neigh->refcount)) {
- if (nr_neigh->ax25)
- ax25_cb_put(nr_neigh->ax25);
kfree(nr_neigh->digipeat);
kfree(nr_neigh);
}
* TCP connection after "boundary" unsucessful, exponentially backed-off
* retransmissions with an initial RTO of TCP_RTO_MIN.
*/
-static inline bool retransmits_timed_out(struct sock *sk,
+static inline bool retransmits_timed_out(const struct sock *sk,
unsigned int boundary)
{
unsigned int timeout, linear_backoff_thresh;
- unsigned int start_ts;
if (!inet_csk(sk)->icsk_retransmits)
return false;
- if (unlikely(!tcp_sk(sk)->retrans_stamp))
- start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
- else
- start_ts = tcp_sk(sk)->retrans_stamp;
-
linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
if (boundary <= linear_backoff_thresh)
timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
- return (tcp_time_stamp - start_ts) >= timeout;
+ return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
}
static inline struct sk_buff *tcp_send_head(struct sock *sk)
#define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */
#define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */
-/* Max number of skb frags allowed, reserving one for fcoe_crc_eof page */
-#define FC_FRAME_SG_LEN (MAX_SKB_FRAGS - 1)
-
#define fp_skb(fp) (&((fp)->skb))
#define fr_hdr(fp) ((fp)->skb.data)
#define fr_len(fp) ((fp)->skb.len)
RPORT_ST_LOGO, /* port logout sent */
RPORT_ST_ADISC, /* Discover Address sent */
RPORT_ST_DELETE, /* port being deleted */
- RPORT_ST_RESTART, /* remote port being deleted and will restart */
};
/**
#define __OSD_PROTOCOL_H__
#include <linux/types.h>
-#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
*/
void *shost_data;
- /*
- * Points to the physical bus device we'd use to do DMA
- * Needed just in case we have virtual hosts.
- */
- struct device *dma_dev;
-
/*
* We should ensure that this is aligned, both for better performance
* and also because some compilers (m68k) don't automatically force
extern void scsi_flush_work(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
-extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
- struct device *,
- struct device *);
+extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_rescan_device(struct device *);
extern void scsi_remove_host(struct Scsi_Host *);
extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
-static inline int __must_check scsi_add_host(struct Scsi_Host *host,
- struct device *dev)
-{
- return scsi_add_host_with_dma(host, dev, dev);
-}
-
static inline struct device *scsi_get_device(struct Scsi_Host *shost)
{
return shost->shost_gendev.parent;
#undef __get_str
#undef TP_printk
-#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
+#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
#undef TP_fast_assign
#define TP_fast_assign(args...) args
{
unsigned long ticks, loopbit;
int lps_precision = LPS_PREC;
- static bool printed;
if (preset_lpj) {
loops_per_jiffy = preset_lpj;
- if (!printed)
- pr_info("Calibrating delay loop (skipped) "
- "preset value.. ");
- } else if ((!printed) && lpj_fine) {
+ printk(KERN_INFO
+ "Calibrating delay loop (skipped) preset value.. ");
+ } else if ((smp_processor_id() == 0) && lpj_fine) {
loops_per_jiffy = lpj_fine;
- pr_info("Calibrating delay loop (skipped), "
+ printk(KERN_INFO
+ "Calibrating delay loop (skipped), "
"value calculated using timer frequency.. ");
} else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
- if (!printed)
- pr_info("Calibrating delay using timer "
- "specific routine.. ");
+ printk(KERN_INFO
+ "Calibrating delay using timer specific routine.. ");
} else {
loops_per_jiffy = (1<<12);
- if (!printed)
- pr_info("Calibrating delay loop... ");
+ printk(KERN_INFO "Calibrating delay loop... ");
while ((loops_per_jiffy <<= 1) != 0) {
/* wait for "start of" clock tick */
ticks = jiffies;
loops_per_jiffy &= ~loopbit;
}
}
- if (!printed)
- pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
+ printk(KERN_CONT "%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
-
- printed = true;
}
void msg_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &msg_ids(ns), freeque);
- idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
#endif
void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
- idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
#endif
void shm_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
- idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
}
#endif
unsigned long flags)
{
struct shm_file_data *sfd = shm_file_data(file);
- return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
- pgoff, flags);
+ return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
}
-static const struct file_operations shm_file_operations = {
- .mmap = shm_mmap,
- .fsync = shm_fsync,
- .release = shm_release,
-};
+int is_file_shm_hugepages(struct file *file)
+{
+ int ret = 0;
+
+ if (file->f_op == &shm_file_operations) {
+ struct shm_file_data *sfd;
+ sfd = shm_file_data(file);
+ ret = is_file_hugepages(sfd->file);
+ }
+ return ret;
+}
-static const struct file_operations shm_file_operations_huge = {
+static const struct file_operations shm_file_operations = {
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
};
-int is_file_shm_hugepages(struct file *file)
-{
- return file->f_op == &shm_file_operations_huge;
-}
-
static const struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
if (!sfd)
goto out_put_dentry;
- file = alloc_file(path.mnt, path.dentry, f_mode,
- is_file_hugepages(shp->shm_file) ?
- &shm_file_operations_huge :
- &shm_file_operations);
+ file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
if (!file)
goto out_free;
ima_counts_get(file);
do_div(elapsed, AHZ);
ac.ac_btime = get_seconds() - elapsed;
/* we really need to bite the bullet and change layout */
- ac.ac_uid = orig_cred->uid;
- ac.ac_gid = orig_cred->gid;
+ current_uid_gid(&ac.ac_uid, &ac.ac_gid);
#if ACCT_VERSION==2
ac.ac_ahz = AHZ;
#endif
owner->root = NULL;
}
- for (i = j = 0; j <= size; i++, j++) {
+ for (i = j = 0; i < size; i++, j++) {
struct audit_tree *s;
if (&chunk->owners[j] == p) {
list_del_init(&p->list);
if (!s) /* result of earlier fallback */
continue;
get_tree(s);
- list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
+ list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
}
list_replace_rcu(&chunk->hash, &new->hash);
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
- put_inotify_watch(&old->watch);
+ put_inotify_watch(watch);
return 0;
}
}
spin_unlock(&hash_lock);
chunk = alloc_chunk(old->count + 1);
- if (!chunk) {
- put_inotify_watch(&old->watch);
+ if (!chunk)
return -ENOMEM;
- }
mutex_lock(&inode->inotify_mutex);
if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
spin_unlock(&hash_lock);
inotify_evict_watch(&old->watch);
mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
- put_inotify_watch(&old->watch); /* and kill it */
+ put_inotify_watch(&old->watch);
return 0;
}
/* make sure l doesn't vanish out from under us */
down_write(&l->mutex);
mutex_unlock(&cgrp->pidlist_mutex);
+ l->use_count++;
return l;
}
}
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) {
- set_cpu_active(cpu, true);
-
nr_calls--;
__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu, nr_calls, NULL);
/* Ensure that we are not runnable on dying cpu */
cpumask_copy(old_allowed, ¤t->cpus_allowed);
- set_cpus_allowed_ptr(current, cpu_active_mask);
+ set_cpus_allowed_ptr(current,
+ cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
- set_cpu_active(cpu, true);
/* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu) == NOTIFY_BAD)
err = _cpu_down(cpu, 0);
+ if (cpu_online(cpu))
+ set_cpu_active(cpu, true);
+
out:
cpu_maps_update_done();
stop_machine_destroy();
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
-
- for_each_online_cpu(cpu) {
- if (cpu == first_cpu)
- continue;
- set_cpu_active(cpu, false);
- }
-
- synchronize_sched();
-
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
continue;
error = _cpu_down(cpu, 1);
- if (!error)
+ if (!error) {
cpumask_set_cpu(cpu, frozen_cpus);
- else {
+ printk("CPU%d is down\n", cpu);
+ } else {
printk(KERN_ERR "Error taking CPU%d down: %d\n",
cpu, error);
break;
if (retval < 0)
return retval;
- if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
+ if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
return -EINVAL;
}
retval = validate_change(cs, trialcs);
}
/* Continue past cpusets with all cpus, mems online */
- if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
+ if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
continue;
/* Remove offline cpus and mems from this cpuset. */
mutex_lock(&callback_mutex);
cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
- cpu_active_mask);
+ cpu_online_mask);
nodes_and(cp->mems_allowed, cp->mems_allowed,
node_states[N_HIGH_MEMORY]);
mutex_unlock(&callback_mutex);
switch (phase) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
break;
default:
cgroup_lock();
mutex_lock(&callback_mutex);
- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
mutex_unlock(&callback_mutex);
scan_for_empty_cpusets(&top_cpuset);
ndoms = generate_sched_domains(&doms, &attr);
void __init cpuset_init_smp(void)
{
- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
hotcpu_notifier(cpuset_track_online_cpus, 0);
#ifdef CONFIG_KEYS
new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
if (!new->tgcred) {
- kmem_cache_free(cred_jar, new);
+ kfree(new);
return NULL;
}
atomic_set(&new->tgcred->usage, 1);
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
+ * @rw: mapping needs to be read/write (values: VERIFY_READ,
+ * VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
-get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
}
again:
- err = get_user_pages_fast(address, 1, 1, &page);
+ err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
if (err < 0)
return err;
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
- struct mm_struct *mm = current->mm;
- int ret;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, (unsigned long)uaddr,
- 1, 1, 0, NULL, NULL);
- up_read(&mm->mmap_sem);
-
+ int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
+ 1, 1, 0, NULL, NULL);
return ret < 0 ? ret : 0;
}
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
-
- /*
- * When pi_state->owner is NULL then the owner died
- * and another waiter is on the fly. pi_state->owner
- * is fixed up by the task which acquires
- * pi_state->rt_mutex.
- *
- * We do not check for pid == 0 which can happen when
- * the owner died and robust_list_exit() cleared the
- * TID.
- */
- if (pid && pi_state->owner) {
- /*
- * Bail out if user space manipulated the
- * futex value.
- */
- if (pid != task_pid_vnr(pi_state->owner))
- return -EINVAL;
- }
+ WARN_ON(pid && pi_state->owner &&
+ pi_state->owner->pid != pid);
atomic_inc(&pi_state->refcount);
*ps = pi_state;
if (!pi_state)
return -EINVAL;
- /*
- * If current does not own the pi_state then the futex is
- * inconsistent and user space fiddled with the futex value.
- */
- if (pi_state->owner != current)
- return -EINVAL;
-
spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
if (!bitset)
return -EINVAL;
- ret = get_futex_key(uaddr, fshared, &key);
+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
int ret, op_ret;
retry:
- ret = get_futex_key(uaddr1, fshared, &key1);
+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2);
+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
pi_state = NULL;
}
- ret = get_futex_key(uaddr1, fshared, &key1);
+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2);
+ ret = get_futex_key(uaddr2, fshared, &key2,
+ requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
*/
retry:
q->key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q->key);
+ ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
if (unlikely(ret != 0))
return ret;
q.requeue_pi_key = NULL;
retry:
q.key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q.key);
+ ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
- goto out_put_key;
+ goto out;
out_unlock_put_key:
queue_unlock(&q, hb);
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
return -EPERM;
- ret = get_futex_key(uaddr, fshared, &key);
+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
rt_waiter.task = NULL;
key2 = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr2, fshared, &key2);
+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
}
#ifdef CONFIG_MODVERSIONS
-/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
-static unsigned long maybe_relocated(unsigned long crc,
- const struct module *crc_owner)
-{
-#ifdef ARCH_RELOCATES_KCRCTAB
- if (crc_owner == NULL)
- return crc - (unsigned long)reloc_start;
-#endif
- return crc;
-}
-
static int check_version(Elf_Shdr *sechdrs,
unsigned int versindex,
const char *symname,
struct module *mod,
- const unsigned long *crc,
- const struct module *crc_owner)
+ const unsigned long *crc)
{
unsigned int i, num_versions;
struct modversion_info *versions;
if (strcmp(versions[i].name, symname) != 0)
continue;
- if (versions[i].crc == maybe_relocated(*crc, crc_owner))
+ if (versions[i].crc == *crc)
return 1;
DEBUGP("Found checksum %lX vs module %lX\n",
- maybe_relocated(*crc, crc_owner), versions[i].crc);
+ *crc, versions[i].crc);
goto bad_version;
}
if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
&crc, true, false))
BUG();
- return check_version(sechdrs, versindex, "module_layout", mod, crc,
- NULL);
+ return check_version(sechdrs, versindex, "module_layout", mod, crc);
}
/* First part is kernel version, which we ignore if module has crcs. */
unsigned int versindex,
const char *symname,
struct module *mod,
- const unsigned long *crc,
- const struct module *crc_owner)
+ const unsigned long *crc)
{
return 1;
}
/* use_module can fail due to OOM,
or module initialization or unloading */
if (sym) {
- if (!check_version(sechdrs, versindex, name, mod, crc, owner)
- || !use_module(mod, owner))
+ if (!check_version(sechdrs, versindex, name, mod, crc) ||
+ !use_module(mod, owner))
sym = NULL;
}
return sym;
* J. Corbet <corbet@lwn.net>
*/
#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
-
-static inline bool sect_empty(const Elf_Shdr *sect)
-{
- return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
-}
-
struct module_sect_attr
{
struct module_attribute mattr;
/* Count loaded sections and allocate structures */
for (i = 0; i < nsect; i++)
- if (!sect_empty(&sechdrs[i]))
+ if (sechdrs[i].sh_flags & SHF_ALLOC
+ && sechdrs[i].sh_size)
nloaded++;
size[0] = ALIGN(sizeof(*sect_attrs)
+ nloaded * sizeof(sect_attrs->attrs[0]),
sattr = §_attrs->attrs[0];
gattr = §_attrs->grp.attrs[0];
for (i = 0; i < nsect; i++) {
- if (sect_empty(&sechdrs[i]))
+ if (! (sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+ if (!sechdrs[i].sh_size)
continue;
sattr->address = sechdrs[i].sh_addr;
sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
/* Count notes sections and allocate structures. */
notes = 0;
for (i = 0; i < nsect; i++)
- if (!sect_empty(&sechdrs[i]) &&
+ if ((sechdrs[i].sh_flags & SHF_ALLOC) &&
(sechdrs[i].sh_type == SHT_NOTE))
++notes;
notes_attrs->notes = notes;
nattr = ¬es_attrs->attrs[0];
for (loaded = i = 0; i < nsect; ++i) {
- if (sect_empty(&sechdrs[i]))
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue;
if (sechdrs[i].sh_type == SHT_NOTE) {
nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
- if (event->cpu != -1 && event->cpu != smp_processor_id())
- continue;
-
hwc = &event->hw;
interrupts = hwc->interrupts;
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
- if (cpu < 0 || cpu >= nr_cpumask_bits)
+ if (cpu < 0 || cpu > num_possible_cpus())
return ERR_PTR(-EINVAL);
/*
perf_mmap_free_page((unsigned long)data->user_page);
for (i = 0; i < data->nr_pages; i++)
perf_mmap_free_page((unsigned long)data->data_pages[i]);
- kfree(data);
}
#else
perf_mmap_unmark_page(base + (i * PAGE_SIZE));
vfree(base);
- kfree(data);
}
static void perf_mmap_data_free(struct perf_mmap_data *data)
data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
perf_mmap_data_free(data);
+ kfree(data);
}
static void perf_mmap_data_release(struct perf_event *event)
static int perf_event_task_match(struct perf_event *event)
{
- if (event->state != PERF_EVENT_STATE_ACTIVE)
- return 0;
-
- if (event->cpu != -1 && event->cpu != smp_processor_id())
- return 0;
-
if (event->attr.comm || event->attr.mmap || event->attr.task)
return 1;
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_task_ctx(&cpuctx->ctx, task_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_lock();
if (!ctx)
ctx = rcu_dereference(task_event->task->perf_event_ctxp);
if (ctx)
perf_event_task_ctx(ctx, task_event);
- put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
static int perf_event_comm_match(struct perf_event *event)
{
- if (event->state != PERF_EVENT_STATE_ACTIVE)
- return 0;
-
- if (event->cpu != -1 && event->cpu != smp_processor_id())
- return 0;
-
if (event->attr.comm)
return 1;
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_lock();
/*
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_comm_ctx(ctx, comm_event);
- put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
- if (event->state != PERF_EVENT_STATE_ACTIVE)
- return 0;
-
- if (event->cpu != -1 && event->cpu != smp_processor_id())
- return 0;
-
if (event->attr.mmap)
return 1;
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_lock();
/*
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_mmap_ctx(ctx, mmap_event);
- put_cpu_var(perf_cpu_context);
rcu_read_unlock();
kfree(buf);
enum perf_type_id type,
u32 event_id, struct pt_regs *regs)
{
- if (event->cpu != -1 && event->cpu != smp_processor_id())
- return 0;
-
if (!perf_swevent_is_counting(event))
return 0;
event->pmu->read(event);
data.addr = 0;
- data.period = event->hw.last_period;
regs = get_irq_regs();
/*
* In case we exclude kernel IPs or are somehow not in interrupt
return &rsp->node[0];
}
-/*
- * Record the specified "completed" value, which is later used to validate
- * dynticks counter manipulations and CPU-offline checks. Specify
- * "rsp->completed - 1" to unconditionally invalidate any future dynticks
- * manipulations and CPU-offline checks. Such invalidation is useful at
- * the beginning of a grace period.
- */
-static void dyntick_record_completed(struct rcu_state *rsp, long comp)
-{
- rsp->dynticks_completed = comp;
-}
-
#ifdef CONFIG_SMP
-/*
- * Recall the previously recorded value of the completion for dynticks.
- */
-static long dyntick_recall_completed(struct rcu_state *rsp)
-{
- return rsp->dynticks_completed;
-}
-
/*
* If the specified CPU is offline, tell the caller that it is in
* a quiescent state. Otherwise, whack it with a reschedule IPI.
set_need_resched();
}
+/*
+ * Record the specified "completed" value, which is later used to validate
+ * dynticks counter manipulations. Specify "rsp->completed - 1" to
+ * unconditionally invalidate any future dynticks manipulations (which is
+ * useful at the beginning of a grace period).
+ */
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+ rsp->dynticks_completed = comp;
+}
+
#ifdef CONFIG_SMP
+/*
+ * Recall the previously recorded value of the completion for dynticks.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+ return rsp->dynticks_completed;
+}
+
/*
* Snapshot the specified CPU's dynticks counter so that we can later
* credit them with an implicit quiescent state. Return 1 if this CPU
#else /* #ifdef CONFIG_NO_HZ */
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+}
+
#ifdef CONFIG_SMP
+/*
+ * If there are no dynticks, then the only way that a CPU can passively
+ * be in a quiescent state is to be offline. Unlike dynticks idle, which
+ * is a point in time during the prior (already finished) grace period,
+ * an offline CPU is always in a quiescent state, and thus can be
+ * unconditionally applied. So just return the current value of completed.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+ return rsp->completed;
+}
+
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
return 0;
/*
* Update CPU-local rcu_data state to record the newly noticed grace period.
* This is used both when we started the grace period and when we notice
- * that someone else started the grace period. The caller must hold the
- * ->lock of the leaf rcu_node structure corresponding to the current CPU,
- * and must have irqs disabled.
+ * that someone else started the grace period.
*/
-static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
-{
- if (rdp->gpnum != rnp->gpnum) {
- rdp->qs_pending = 1;
- rdp->passed_quiesc = 0;
- rdp->gpnum = rnp->gpnum;
- }
-}
-
static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
{
- unsigned long flags;
- struct rcu_node *rnp;
-
- local_irq_save(flags);
- rnp = rdp->mynode;
- if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
- local_irq_restore(flags);
- return;
- }
- __note_new_gpnum(rsp, rnp, rdp);
- spin_unlock_irqrestore(&rnp->lock, flags);
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->gpnum = rsp->gpnum;
}
/*
return ret;
}
-/*
- * Advance this CPU's callbacks, but only if the current grace period
- * has ended. This may be called only from the CPU to whom the rdp
- * belongs. In addition, the corresponding leaf rcu_node structure's
- * ->lock must be held by the caller, with irqs disabled.
- */
-static void
-__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
-{
- /* Did another grace period end? */
- if (rdp->completed != rnp->completed) {
-
- /* Advance callbacks. No harm if list empty. */
- rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
-
- /* Remember that we saw this grace-period completion. */
- rdp->completed = rnp->completed;
- }
-}
-
-/*
- * Advance this CPU's callbacks, but only if the current grace period
- * has ended. This may be called only from the CPU to whom the rdp
- * belongs.
- */
-static void
-rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
-{
- unsigned long flags;
- struct rcu_node *rnp;
-
- local_irq_save(flags);
- rnp = rdp->mynode;
- if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
- local_irq_restore(flags);
- return;
- }
- __rcu_process_gp_end(rsp, rnp, rdp);
- spin_unlock_irqrestore(&rnp->lock, flags);
-}
-
-/*
- * Do per-CPU grace-period initialization for running CPU. The caller
- * must hold the lock of the leaf rcu_node structure corresponding to
- * this CPU.
- */
-static void
-rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
-{
- /* Prior grace period ended, so advance callbacks for current CPU. */
- __rcu_process_gp_end(rsp, rnp, rdp);
-
- /*
- * Because this CPU just now started the new grace period, we know
- * that all of its callbacks will be covered by this upcoming grace
- * period, even the ones that were registered arbitrarily recently.
- * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
- *
- * Other CPUs cannot be sure exactly when the grace period started.
- * Therefore, their recently registered callbacks must pass through
- * an additional RCU_NEXT_READY stage, so that they will be handled
- * by the next RCU grace period.
- */
- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
-
- /* Set state so that this CPU will detect the next quiescent state. */
- __note_new_gpnum(rsp, rnp, rdp);
-}
-
/*
* Start a new RCU grace period if warranted, re-initializing the hierarchy
* in preparation for detecting the next grace period. The caller must hold
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
record_gp_stall_check_time(rsp);
dyntick_record_completed(rsp, rsp->completed - 1);
+ note_new_gpnum(rsp, rdp);
+
+ /*
+ * Because this CPU just now started the new grace period, we know
+ * that all of its callbacks will be covered by this upcoming grace
+ * period, even the ones that were registered arbitrarily recently.
+ * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
+ *
+ * Other CPUs cannot be sure exactly when the grace period started.
+ * Therefore, their recently registered callbacks must pass through
+ * an additional RCU_NEXT_READY stage, so that they will be handled
+ * by the next RCU grace period.
+ */
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
/* Special-case the common single-level case. */
if (NUM_RCU_NODES == 1) {
rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
rnp->gpnum = rsp->gpnum;
- rnp->completed = rsp->completed;
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
- rcu_start_gp_per_cpu(rsp, rnp, rdp);
spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
rnp->gpnum = rsp->gpnum;
- rnp->completed = rsp->completed;
- if (rnp == rdp->mynode)
- rcu_start_gp_per_cpu(rsp, rnp, rdp);
spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
spin_unlock_irqrestore(&rsp->onofflock, flags);
}
+/*
+ * Advance this CPU's callbacks, but only if the current grace period
+ * has ended. This may be called only from the CPU to whom the rdp
+ * belongs.
+ */
+static void
+rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ long completed_snap;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
+
+ /* Did another grace period end? */
+ if (rdp->completed != completed_snap) {
+
+ /* Advance callbacks. No harm if list empty. */
+ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ /* Remember that we saw this grace-period completion. */
+ rdp->completed = completed_snap;
+ }
+ local_irq_restore(flags);
+}
+
/*
* Clean up after the prior grace period and let rcu_start_gp() start up
* the next grace period if one is needed. Note that the caller must
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
rsp->completed = rsp->gpnum;
rsp->signaled = RCU_GP_IDLE;
+ rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
}
long lastcomp;
struct rcu_node *rnp = rcu_get_root(rsp);
u8 signaled;
- u8 forcenow;
if (!rcu_gp_in_progress(rsp))
return; /* No grace period in progress, nothing to force. */
if (rcu_process_dyntick(rsp, lastcomp,
dyntick_save_progress_counter))
goto unlock_ret;
- /* fall into next case. */
-
- case RCU_SAVE_COMPLETED:
/* Update state, record completion counter. */
- forcenow = 0;
spin_lock(&rnp->lock);
if (lastcomp == rsp->completed &&
- rsp->signaled == signaled) {
+ rsp->signaled == RCU_SAVE_DYNTICK) {
rsp->signaled = RCU_FORCE_QS;
dyntick_record_completed(rsp, lastcomp);
- forcenow = signaled == RCU_SAVE_COMPLETED;
}
spin_unlock(&rnp->lock);
- if (!forcenow)
- break;
- /* fall into next case. */
+ break;
case RCU_FORCE_QS:
rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
{
unsigned long flags;
+ long lastcomp;
unsigned long mask;
struct rcu_data *rdp = rsp->rda[cpu];
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
spin_lock_irqsave(&rnp->lock, flags);
+ lastcomp = rsp->completed;
+ rdp->completed = lastcomp;
+ rdp->gpnum = lastcomp;
rdp->passed_quiesc = 0; /* We could be racing with new GP, */
rdp->qs_pending = 1; /* so set up to respond to current GP. */
rdp->beenonline = 1; /* We have now been online. */
rdp->preemptable = preemptable;
+ rdp->passed_quiesc_completed = lastcomp - 1;
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;
spin_lock(&rnp->lock); /* irqs already disabled. */
rnp->qsmaskinit |= mask;
mask = rnp->grpmask;
- if (rnp == rdp->mynode) {
- rdp->gpnum = rnp->completed; /* if GP in progress... */
- rdp->completed = rnp->completed;
- rdp->passed_quiesc_completed = rnp->completed - 1;
- }
spin_unlock(&rnp->lock); /* irqs already disabled. */
rnp = rnp->parent;
} while (rnp != NULL && !(rnp->qsmaskinit & mask));
long gpnum; /* Current grace period for this node. */
/* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */
- long completed; /* Last grace period completed for this node. */
- /* This will either be equal to or one */
- /* behind the root rcu_node's gpnum. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
/* In leaf rcu_node, each bit corresponds to */
#define RCU_GP_IDLE 0 /* No grace period in progress. */
#define RCU_GP_INIT 1 /* Grace period being initialized. */
#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
-#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */
-#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
+#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
#ifdef CONFIG_NO_HZ
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
#else /* #ifdef CONFIG_NO_HZ */
-#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED
+#define RCU_SIGNAL_INIT RCU_FORCE_QS
#endif /* #else #ifdef CONFIG_NO_HZ */
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_NO_HZ
long dynticks_completed; /* Value of completed @ snap. */
- /* Protected by fqslock. */
+#endif /* #ifdef CONFIG_NO_HZ */
};
#ifdef RCU_TREE_NONCORE
#else /* #ifdef RCU_TREE_NONCORE */
/* Forward declarations for rcutree_plugin.h */
-static void rcu_bootup_announce(void);
+static inline void rcu_bootup_announce(void);
long rcu_batches_completed(void);
static void rcu_preempt_note_context_switch(int cpu);
static int rcu_preempted_readers(struct rcu_node *rnp);
/*
* Tell them what RCU they are running.
*/
-static void rcu_bootup_announce(void)
+static inline void rcu_bootup_announce(void)
{
printk(KERN_INFO
"Experimental preemptable hierarchical RCU implementation.\n");
/*
* Tell them what RCU they are running.
*/
-static void rcu_bootup_announce(void)
+static inline void rcu_bootup_announce(void)
{
printk(KERN_INFO "Hierarchical RCU implementation.\n");
}
u64 rt_avg;
u64 age_stamp;
- u64 idle_stamp;
- u64 avg_idle;
#endif
/* calc_load related fields */
* default: 0.25ms
*/
unsigned int sysctl_sched_shares_ratelimit = 250000;
-unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
/*
* Inject some fuzzyness into changing the per-cpu group shares
#endif
static void calc_load_account_active(struct rq *this_rq);
-static void update_sysctl(void);
#include "sched_stats.h"
#include "sched_idletask.c"
{
s64 delta;
- if (p->sched_class != &fair_sched_class)
- return 0;
-
/*
* Buddy candidates are cache hot:
*/
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+ if (p->sched_class != &fair_sched_class)
+ return 0;
+
if (sysctl_sched_migration_cost == -1)
return 1;
if (sysctl_sched_migration_cost == 0)
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
p->sched_class->task_wake_up(rq, p);
-
- if (unlikely(rq->idle_stamp)) {
- u64 delta = rq->clock - rq->idle_stamp;
- u64 max = 2*sysctl_sched_migration_cost;
-
- if (delta > max)
- rq->avg_idle = max;
- else
- update_avg(&rq->avg_idle, delta);
- rq->idle_stamp = 0;
- }
#endif
out:
task_rq_unlock(rq, &flags);
unsigned long flags;
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
- cpumask_copy(cpus, cpu_active_mask);
+ cpumask_setall(cpus);
/*
* When power savings policy is enabled for the parent domain, idle
int all_pinned = 0;
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
- cpumask_copy(cpus, cpu_active_mask);
+ cpumask_setall(cpus);
/*
* When power savings policy is enabled for the parent domain, idle
int pulled_task = 0;
unsigned long next_balance = jiffies + HZ;
- this_rq->idle_stamp = this_rq->clock;
-
- if (this_rq->avg_idle < sysctl_sched_migration_cost)
- return;
-
for_each_domain(this_cpu, sd) {
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
next_balance = sd->last_balance + interval;
- if (pulled_task) {
- this_rq->idle_stamp = 0;
+ if (pulled_task)
break;
- }
}
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
/*
cpumask_set_cpu(cpu, nohz.cpu_mask);
/* time for ilb owner also to sleep */
- if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
+ if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
if (atomic_read(&nohz.load_balancer) == cpu)
atomic_set(&nohz.load_balancer, -1);
return 0;
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
-static void update_sysctl(void)
+static inline void sched_init_granularity(void)
{
- unsigned int cpus = min(num_online_cpus(), 8U);
- unsigned int factor = 1 + ilog2(cpus);
+ unsigned int factor = 1 + ilog2(num_online_cpus());
+ const unsigned long limit = 200000000;
-#define SET_SYSCTL(name) \
- (sysctl_##name = (factor) * normalized_sysctl_##name)
- SET_SYSCTL(sched_min_granularity);
- SET_SYSCTL(sched_latency);
- SET_SYSCTL(sched_wakeup_granularity);
- SET_SYSCTL(sched_shares_ratelimit);
-#undef SET_SYSCTL
-}
+ sysctl_sched_min_granularity *= factor;
+ if (sysctl_sched_min_granularity > limit)
+ sysctl_sched_min_granularity = limit;
-static inline void sched_init_granularity(void)
-{
- update_sysctl();
+ sysctl_sched_latency *= factor;
+ if (sysctl_sched_latency > limit)
+ sysctl_sched_latency = limit;
+
+ sysctl_sched_wakeup_granularity *= factor;
+
+ sysctl_sched_shares_ratelimit *= factor;
}
#ifdef CONFIG_SMP
int ret = 0;
rq = task_rq_lock(p, &flags);
- if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+ if (!cpumask_intersects(new_mask, cpu_online_mask)) {
ret = -EINVAL;
goto out;
}
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
- if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
+ if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
/* Need help from migration thread: drop lock and wait. */
struct task_struct *mt = rq->migration_thread;
again:
/* Look for allowed, online CPU in same node. */
- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+ for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
goto move;
/* Any allowed, online CPU? */
- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
if (dest_cpu < nr_cpu_ids)
goto move;
/* No more Mr. Nice Guy. */
if (dest_cpu >= nr_cpu_ids) {
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
- dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+ dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
/*
* Don't tell them about moving exiting tasks or
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
+ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
unsigned long flags;
local_irq_save(flags);
static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
- int i, cpu_num = num_possible_cpus();
+ int i, cpu_num = num_online_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
char buf[32];
if (entry == NULL)
return;
- for_each_possible_cpu(i) {
+ for_each_online_cpu(i) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
static void free_rootdomain(struct root_domain *rd)
{
- synchronize_sched();
-
cpupri_cleanup(&rd->cpupri);
free_cpumask_var(rd->rto_mask);
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
- alloc_bootmem_cpumask_var(&cpu_isolated_map);
cpulist_parse(str, cpu_isolated_map);
return 1;
}
if (doms_new == NULL) {
ndoms_cur = 0;
doms_new = fallback_doms;
- cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map);
+ cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
}
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
partition_sched_domains(1, NULL, NULL);
return NOTIFY_OK;
#endif
get_online_cpus();
mutex_lock(&sched_domains_mutex);
- arch_init_sched_domains(cpu_active_mask);
+ arch_init_sched_domains(cpu_online_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
rq->cpu = i;
rq->online = 0;
rq->migration_thread = NULL;
- rq->idle_stamp = 0;
- rq->avg_idle = 2*sysctl_sched_migration_cost;
INIT_LIST_HEAD(&rq->migration_queue);
rq_attach_root(rq, &def_root_domain);
#endif
zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
#endif
- /* May be allocated at isolcpus cmdline parse time */
- if (cpu_isolated_map == NULL)
- zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
perf_event_init();
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
-unsigned long long cpu_clock(int cpu)
-{
- unsigned long long clock;
- unsigned long flags;
-
- local_irq_save(flags);
- clock = sched_clock_cpu(cpu);
- local_irq_restore(flags);
-
- return clock;
-}
-
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void sched_clock_init(void)
return sched_clock();
}
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned long long cpu_clock(int cpu)
{
- return sched_clock_cpu(cpu);
-}
+ unsigned long long clock;
+ unsigned long flags;
-#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
+ local_irq_save(flags);
+ clock = sched_clock_cpu(cpu);
+ local_irq_restore(flags);
+ return clock;
+}
EXPORT_SYMBOL_GPL(cpu_clock);
#ifdef CONFIG_SCHEDSTATS
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
-#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
P(yld_count);
P(sched_switch);
P(sched_count);
P(sched_goidle);
-#ifdef CONFIG_SMP
- P64(avg_idle);
-#endif
P(ttwu_count);
P(ttwu_local);
* run vmstat and monitor the context-switches (cs) field)
*/
unsigned int sysctl_sched_latency = 5000000ULL;
-unsigned int normalized_sysctl_sched_latency = 5000000ULL;
/*
* Minimal preemption granularity for CPU-bound tasks:
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_min_granularity = 1000000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
* have immediate wakeup/sleep latencies.
*/
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
-unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
rcu_read_lock();
for_each_domain(cpu, tmp) {
- if (!(tmp->flags & SD_LOAD_BALANCE))
- continue;
-
/*
* If power savings logic is enabled for a domain, see if we
* are not overloaded, if so, don't balance wider.
want_sd = 0;
}
- if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
- int candidate = -1, i;
-
- if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
- candidate = cpu;
-
- /*
- * Check for an idle shared cache.
- */
- if (tmp->flags & SD_PREFER_SIBLING) {
- if (candidate == cpu) {
- if (!cpu_rq(prev_cpu)->cfs.nr_running)
- candidate = prev_cpu;
- }
-
- if (candidate == -1 || candidate == cpu) {
- for_each_cpu(i, sched_domain_span(tmp)) {
- if (!cpumask_test_cpu(i, &p->cpus_allowed))
- continue;
- if (!cpu_rq(i)->cfs.nr_running) {
- candidate = i;
- break;
- }
- }
- }
- }
+ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
+ cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
- if (candidate >= 0) {
- affine_sd = tmp;
- want_affine = 0;
- cpu = candidate;
- }
+ affine_sd = tmp;
+ want_affine = 0;
}
if (!want_sd && !want_affine)
return 0;
}
-
-static void rq_online_fair(struct rq *rq)
-{
- update_sysctl();
-}
-
-static void rq_offline_fair(struct rq *rq)
-{
- update_sysctl();
-}
-
#endif /* CONFIG_SMP */
/*
.load_balance = load_balance_fair,
.move_one_task = move_one_task_fair,
- .rq_online = rq_online_fair,
- .rq_offline = rq_offline_fair,
#endif
.set_curr_task = set_curr_task_fair,
for (i = 0; i < 16; i++) {
unsigned char insn;
- if (get_user(insn, (unsigned char *)(regs->ip + i)))
- break;
+ __get_user(insn, (unsigned char *)(regs->ip + i));
printk("%02x ", insn);
}
}
.strategy = &sysctl_jiffies,
},
#endif
-#ifdef CONFIG_MMU
{
.ctl_name = CTL_UNNUMBERED,
.procname = "mmap_min_addr",
.mode = 0644,
.proc_handler = &mmap_min_addr_handler,
},
-#endif
#ifdef CONFIG_NUMA
{
.ctl_name = CTL_UNNUMBERED,
.data = &show_unhandled_signals,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_dointvec
},
#endif
{ .ctl_name = 0 }
{ NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
{ NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
{ NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
- { NET_IPV4_CONF_SRC_VMARK, "src_valid_mark" },
{}
};
#include <linux/sysdev.h>
#include <linux/tick.h>
-#include "tick-internal.h"
-
/* The registered clock event devices */
static LIST_HEAD(clockevent_devices);
static LIST_HEAD(clockevents_released);
*/
void clockevents_notify(unsigned long reason, void *arg)
{
- struct clock_event_device *dev, *tmp;
+ struct list_head *node, *tmp;
unsigned long flags;
- int cpu;
spin_lock_irqsave(&clockevents_lock, flags);
clockevents_do_notify(reason, arg);
* Unregister the clock event devices which were
* released from the users in the notify chain.
*/
- list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
- list_del(&dev->list);
- /*
- * Now check whether the CPU has left unused per cpu devices
- */
- cpu = *((int *)arg);
- list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
- if (cpumask_test_cpu(cpu, dev->cpumask) &&
- cpumask_weight(dev->cpumask) == 1 &&
- !tick_is_broadcast_device(dev)) {
- BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
- list_del(&dev->list);
- }
- }
+ list_for_each_safe(node, tmp, &clockevents_released)
+ list_del(node);
break;
default:
break;
clocksource_resume_watchdog();
}
-/**
- * clocksource_max_deferment - Returns max time the clocksource can be deferred
- * @cs: Pointer to clocksource
- *
- */
-static u64 clocksource_max_deferment(struct clocksource *cs)
-{
- u64 max_nsecs, max_cycles;
-
- /*
- * Calculate the maximum number of cycles that we can pass to the
- * cyc2ns function without overflowing a 64-bit signed result. The
- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
- * is equivalent to the below.
- * max_cycles < (2^63)/cs->mult
- * max_cycles < 2^(log2((2^63)/cs->mult))
- * max_cycles < 2^(log2(2^63) - log2(cs->mult))
- * max_cycles < 2^(63 - log2(cs->mult))
- * max_cycles < 1 << (63 - log2(cs->mult))
- * Please note that we add 1 to the result of the log2 to account for
- * any rounding errors, ensure the above inequality is satisfied and
- * no overflow will occur.
- */
- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
-
- /*
- * The actual maximum number of cycles we can defer the clocksource is
- * determined by the minimum of max_cycles and cs->mask.
- */
- max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
-
- /*
- * To ensure that the clocksource does not wrap whilst we are idle,
- * limit the time the clocksource can be deferred by 12.5%. Please
- * note a margin of 12.5% is used because this can be computed with
- * a shift, versus say 10% which would require division.
- */
- return max_nsecs - (max_nsecs >> 5);
-}
-
#ifdef CONFIG_GENERIC_TIME
/**
*/
int clocksource_register(struct clocksource *cs)
{
- /* calculate max idle time permitted for this clocksource */
- cs->max_idle_ns = clocksource_max_deferment(cs);
-
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_select();
struct tick_sched *ts;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
- u64 time_delta;
int cpu;
local_irq_save(flags);
seq = read_seqbegin(&xtime_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
-
- /*
- * On SMP we really should only care for the CPU which
- * has the do_timer duty assigned. All other CPUs can
- * sleep as long as they want.
- */
- if (cpu == tick_do_timer_cpu ||
- tick_do_timer_cpu == TICK_DO_TIMER_NONE)
- time_delta = timekeeping_max_deferment();
- else
- time_delta = KTIME_MAX;
} while (read_seqretry(&xtime_lock, seq));
/* Get the next timer wheel timer */
if ((long)delta_jiffies >= 1) {
/*
- * calculate the expiry time for the next timer wheel
- * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
- * that there is no timer pending or at least extremely
- * far into the future (12 days for HZ=1000). In this
- * case we set the expiry to the end of time.
- */
- if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
- /*
- * Calculate the time delta for the next timer event.
- * If the time delta exceeds the maximum time delta
- * permitted by the current clocksource then adjust
- * the time delta accordingly to ensure the
- * clocksource does not wrap.
- */
- time_delta = min_t(u64, time_delta,
- tick_period.tv64 * delta_jiffies);
- expires = ktime_add_ns(last_update, time_delta);
- } else {
- expires.tv64 = KTIME_MAX;
- }
+ * calculate the expiry time for the next timer wheel
+ * timer
+ */
+ expires = ktime_add_ns(last_update, tick_period.tv64 *
+ delta_jiffies);
/*
* If this cpu is the one which updates jiffies, then
ts->idle_sleeps++;
- /* Mark expires */
- ts->idle_expires = expires;
-
/*
- * If the expiration time == KTIME_MAX, then
- * in this case we simply stop the tick timer.
+ * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
+ * there is no timer pending or at least extremly far
+ * into the future (12 days for HZ=1000). In this case
+ * we simply stop the tick timer:
*/
- if (unlikely(expires.tv64 == KTIME_MAX)) {
+ if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) {
+ ts->idle_expires.tv64 = KTIME_MAX;
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
hrtimer_cancel(&ts->sched_timer);
goto out;
}
+ /* Mark expiries */
+ ts->idle_expires = expires;
+
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer, expires,
HRTIMER_MODE_ABS_PINNED);
return ret;
}
-/**
- * timekeeping_max_deferment - Returns max time the clocksource can be deferred
- *
- * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
- * ensure that the clocksource does not change!
- */
-u64 timekeeping_max_deferment(void)
-{
- return timekeeper.clock->max_idle_ns;
-}
-
/**
* read_persistent_clock - Return time from the persistent clock.
*
set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
}
-EXPORT_SYMBOL_GPL(getboottime);
/**
* monotonic_to_bootbased - Convert the monotonic time to boot based.
{
*ts = timespec_add_safe(*ts, total_sleep_time);
}
-EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
return count;
}
-static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
+static int dma_debug_device_change(struct notifier_block *nb,
+ unsigned long action, void *data)
{
struct device *dev = data;
int count;
- if (global_disable)
- return 0;
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
{
struct notifier_block *nb;
- if (global_disable)
- return;
-
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (nb == NULL) {
pr_err("dma_debug_add_bus: out of memory\n");
ref->size);
}
- if (entry->direction == DMA_BIDIRECTIONAL)
- goto out;
-
if (ref->direction != entry->direction) {
err_printk(dev, entry, "DMA-API: device driver syncs "
"DMA memory with different direction "
dir2name[ref->direction]);
}
+ if (entry->direction == DMA_BIDIRECTIONAL)
+ goto out;
+
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
!(ref->direction == DMA_TO_DEVICE))
err_printk(dev, entry, "DMA-API: device driver syncs "
out:
put_hash_bucket(bucket, &flags);
+
}
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
*/
#include <linux/rational.h>
-#include <linux/module.h>
/*
* calculate best rational approximation for a given fraction
config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation"
- depends on MMU
default 4096
help
This is the portion of low virtual memory which should be protected
static struct page *__read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *,struct page*),
- void *data,
- gfp_t gfp)
+ void *data)
{
struct page *page;
int err;
repeat:
page = find_get_page(mapping, index);
if (!page) {
- page = __page_cache_alloc(gfp | __GFP_COLD);
+ page = page_cache_alloc_cold(mapping);
if (!page)
return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
return page;
}
-static struct page *do_read_cache_page(struct address_space *mapping,
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @filler: function to perform the read
+ * @data: destination for read data
+ *
+ * Same as read_cache_page, but don't wait for page to become unlocked
+ * after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
+ */
+struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *,struct page*),
- void *data,
- gfp_t gfp)
-
+ void *data)
{
struct page *page;
int err;
retry:
- page = __read_cache_page(mapping, index, filler, data, gfp);
+ page = __read_cache_page(mapping, index, filler, data);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
mark_page_accessed(page);
return page;
}
-
-/**
- * read_cache_page_async - read into page cache, fill it if needed
- * @mapping: the page's address_space
- * @index: the page index
- * @filler: function to perform the read
- * @data: destination for read data
- *
- * Same as read_cache_page, but don't wait for page to become unlocked
- * after submitting it to the filler.
- *
- * Read into the page cache. If a page already exists, and PageUptodate() is
- * not set, try to fill the page but don't wait for it to become unlocked.
- *
- * If the page does not get brought uptodate, return -EIO.
- */
-struct page *read_cache_page_async(struct address_space *mapping,
- pgoff_t index,
- int (*filler)(void *,struct page*),
- void *data)
-{
- return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
-}
EXPORT_SYMBOL(read_cache_page_async);
-static struct page *wait_on_page_read(struct page *page)
-{
- if (!IS_ERR(page)) {
- wait_on_page_locked(page);
- if (!PageUptodate(page)) {
- page_cache_release(page);
- page = ERR_PTR(-EIO);
- }
- }
- return page;
-}
-
-/**
- * read_cache_page_gfp - read into page cache, using specified page allocation flags.
- * @mapping: the page's address_space
- * @index: the page index
- * @gfp: the page allocator flags to use if allocating
- *
- * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
- *
- * If the page does not get brought uptodate, return -EIO.
- */
-struct page *read_cache_page_gfp(struct address_space *mapping,
- pgoff_t index,
- gfp_t gfp)
-{
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
-
- return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
-}
-EXPORT_SYMBOL(read_cache_page_gfp);
-
/**
* read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
int (*filler)(void *,struct page*),
void *data)
{
- return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
+ struct page *page;
+
+ page = read_cache_page_async(mapping, index, filler, data);
+ if (IS_ERR(page))
+ goto out;
+ wait_on_page_locked(page);
+ if (!PageUptodate(page)) {
+ page_cache_release(page);
+ page = ERR_PTR(-EIO);
+ }
+ out:
+ return page;
}
EXPORT_SYMBOL(read_cache_page);
if (unlikely(status))
break;
- if (mapping_writably_mapped(mapping))
- flush_dcache_page(page);
-
pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
pagefault_enable();
}
/*
- * must be called with vma's mmap_sem held for read or write, and page locked.
+ * must be called with vma's mmap_sem held for read, and page locked.
*/
extern void mlock_vma_page(struct page *page);
-extern void munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
#include <linux/ksm.h>
#include <asm/tlbflush.h>
-#include "internal.h"
/*
* A few notes about the KSM scanning process,
* ptes are necessarily already write-protected. But in either
* case, we need to lock and check page_count is not raised.
*/
- if (write_protect_page(vma, oldpage, &orig_pte) == 0 &&
- pages_identical(oldpage, newpage))
- err = replace_page(vma, oldpage, newpage, orig_pte);
+ if (write_protect_page(vma, oldpage, &orig_pte)) {
+ unlock_page(oldpage);
+ goto out_putpage;
+ }
+ unlock_page(oldpage);
- if ((vma->vm_flags & VM_LOCKED) && !err)
- munlock_vma_page(oldpage);
+ if (pages_identical(oldpage, newpage))
+ err = replace_page(vma, oldpage, newpage, orig_pte);
- unlock_page(oldpage);
out_putpage:
put_page(oldpage);
put_page(newpage);
task_unlock(task);
if (!curr)
return 0;
- /*
- * We should check use_hierarchy of "mem" not "curr". Because checking
- * use_hierarchy of "curr" here make this function true if hierarchy is
- * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
- * hierarchy(even if use_hierarchy is disabled in "mem").
- */
- if (mem->use_hierarchy)
+ if (curr->use_hierarchy)
ret = css_is_ancestor(&curr->css, &mem->css);
else
ret = (curr == mem);
if (free_all)
goto try_to_free;
move_account:
- do {
+ while (mem->res.usage > 0) {
ret = -EBUSY;
if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
goto out;
if (ret == -ENOMEM)
goto try_to_free;
cond_resched();
- /* "ret" should also be checked to ensure all lists are empty. */
- } while (mem->res.usage > 0 || ret);
+ }
+ ret = 0;
out:
css_put(&mem->css);
return ret;
}
lru_add_drain();
/* try move_account...there may be some *locked* pages. */
- goto move_account;
+ if (mem->res.usage)
+ goto move_account;
+ ret = 0;
+ goto out;
}
int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
val += idx_val;
mem_cgroup_get_recursive_idx_stat(mem,
MEM_CGROUP_STAT_SWAPOUT, &idx_val);
- val += idx_val;
val <<= PAGE_SHIFT;
} else
val = res_counter_read_u64(&mem->memsw, name);
ret = VM_FAULT_HWPOISON;
} else {
print_bad_pte(vma, address, orig_pte, NULL);
- ret = VM_FAULT_SIGBUS;
+ ret = VM_FAULT_OOM;
}
goto out;
}
* Page table corrupted: show pte and kill process.
*/
print_bad_pte(vma, address, orig_pte, NULL);
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_OOM;
}
pgoff = pte_to_pgoff(orig_pte);
goto out_pm;
err = -ENODEV;
- if (node < 0 || node >= MAX_NUMNODES)
- goto out_pm;
-
if (!node_state(node, N_HIGH_MEMORY))
goto out_pm;
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
-#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
if (!vma || addr < vma->vm_start)
return -ENOMEM;
-#ifdef CONFIG_HUGETLB_PAGE
- if (is_vm_hugetlb_page(vma)) {
- struct hstate *h;
- unsigned long nr_huge;
- unsigned char present;
-
- i = 0;
- nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
- h = hstate_vma(vma);
- nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
- - (addr >> huge_page_shift(h)) + 1;
- nr_huge = min(nr_huge,
- (vma->vm_end - addr) >> huge_page_shift(h));
- while (1) {
- /* hugepage always in RAM for now,
- * but generally it needs to be check */
- ptep = huge_pte_offset(current->mm,
- addr & huge_page_mask(h));
- present = !!(ptep &&
- !huge_pte_none(huge_ptep_get(ptep)));
- while (1) {
- vec[i++] = present;
- addr += PAGE_SIZE;
- /* reach buffer limit */
- if (i == nr)
- return nr;
- /* check hugepage border */
- if (!((addr & ~huge_page_mask(h))
- >> PAGE_SHIFT))
- break;
- }
- }
- return nr;
- }
-#endif
-
/*
* Calculate how many pages there are left in the last level of the
* PTE array for our address.
* not get another chance to clear PageMlocked. If we successfully
* isolate the page and try_to_munlock() detects other VM_LOCKED vmas
* mapping the page, it will restore the PageMlocked state, unless the page
- * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(),
+ * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
* perhaps redundantly.
* If we lose the isolation race, and the page is mapped by other VM_LOCKED
* vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
* either of which will restore the PageMlocked state by calling
* mlock_vma_page() above, if it can grab the vma's mmap sem.
*/
-void munlock_vma_page(struct page *page)
+static void munlock_vma_page(struct page *page)
{
BUG_ON(!PageLocked(page));
if (!(flags & MAP_FIXED))
addr = round_hint_to_min(addr);
+ error = arch_mmap_check(addr, len, flags);
+ if (error)
+ return error;
+
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
- if (!len)
+ if (!len || len > TASK_SIZE)
return -ENOMEM;
/* offset overflow? */
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
+ if (flags & MAP_HUGETLB) {
+ struct user_struct *user = NULL;
+ if (file)
+ return -EINVAL;
+
+ /*
+ * VM_NORESERVE is used because the reservations will be
+ * taken when vm_ops->mmap() is called
+ * A dummy user value is used because we are not locking
+ * memory so no accounting is necessary
+ */
+ len = ALIGN(len, huge_page_size(&default_hstate));
+ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
+ &user, HUGETLB_ANONHUGE_INODE);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ }
+
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
- unsigned long error = arch_mmap_check(addr, len, flags);
- if (error)
- return error;
-
- /* Careful about overflows.. */
- if (len > TASK_SIZE)
- return -ENOMEM;
-
get_area = current->mm->get_unmapped_area;
if (file && file->f_op && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
if (!len)
return addr;
+ if ((addr + len) > TASK_SIZE || (addr + len) < addr)
+ return -EINVAL;
+
+ if (is_hugepage_only_range(mm, addr, len))
+ return -EINVAL;
+
error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
if (error)
return error;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
- if (error & ~PAGE_MASK)
+ error = arch_mmap_check(addr, len, flags);
+ if (error)
return error;
/*
return new_addr;
}
-static struct vm_area_struct *vma_to_resize(unsigned long addr,
- unsigned long old_len, unsigned long new_len, unsigned long *p)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma = find_vma(mm, addr);
-
- if (!vma || vma->vm_start > addr)
- goto Efault;
-
- if (is_vm_hugetlb_page(vma))
- goto Einval;
-
- /* We can't remap across vm area boundaries */
- if (old_len > vma->vm_end - addr)
- goto Efault;
-
- if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
- if (new_len > old_len)
- goto Efault;
- }
-
- if (vma->vm_flags & VM_LOCKED) {
- unsigned long locked, lock_limit;
- locked = mm->locked_vm << PAGE_SHIFT;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
- locked += new_len - old_len;
- if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- goto Eagain;
- }
-
- if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
- goto Enomem;
-
- if (vma->vm_flags & VM_ACCOUNT) {
- unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
- if (security_vm_enough_memory(charged))
- goto Efault;
- *p = charged;
- }
-
- return vma;
-
-Efault: /* very odd choice for most of the cases, but... */
- return ERR_PTR(-EFAULT);
-Einval:
- return ERR_PTR(-EINVAL);
-Enomem:
- return ERR_PTR(-ENOMEM);
-Eagain:
- return ERR_PTR(-EAGAIN);
-}
-
-static unsigned long mremap_to(unsigned long addr,
- unsigned long old_len, unsigned long new_addr,
- unsigned long new_len)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long ret = -EINVAL;
- unsigned long charged = 0;
- unsigned long map_flags;
-
- if (new_addr & ~PAGE_MASK)
- goto out;
-
- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
- goto out;
-
- /* Check if the location we're moving into overlaps the
- * old location at all, and fail if it does.
- */
- if ((new_addr <= addr) && (new_addr+new_len) > addr)
- goto out;
-
- if ((addr <= new_addr) && (addr+old_len) > new_addr)
- goto out;
-
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
- if (ret)
- goto out;
-
- ret = do_munmap(mm, new_addr, new_len);
- if (ret)
- goto out;
-
- if (old_len >= new_len) {
- ret = do_munmap(mm, addr+new_len, old_len - new_len);
- if (ret && old_len != new_len)
- goto out;
- old_len = new_len;
- }
-
- vma = vma_to_resize(addr, old_len, new_len, &charged);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto out;
- }
-
- map_flags = MAP_FIXED;
- if (vma->vm_flags & VM_MAYSHARE)
- map_flags |= MAP_SHARED;
-
- ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
- ((addr - vma->vm_start) >> PAGE_SHIFT),
- map_flags);
- if (ret & ~PAGE_MASK)
- goto out1;
-
- ret = move_vma(vma, addr, old_len, new_len, new_addr);
- if (!(ret & ~PAGE_MASK))
- goto out;
-out1:
- vm_unacct_memory(charged);
-
-out:
- return ret;
-}
-
-static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
-{
- unsigned long end = vma->vm_end + delta;
- if (end < vma->vm_end) /* overflow */
- return 0;
- if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
- return 0;
- if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
- 0, MAP_FIXED) & ~PAGE_MASK)
- return 0;
- return 1;
-}
-
/*
* Expand (or shrink) an existing mapping, potentially moving it at the
* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
if (!new_len)
goto out;
+ /* new_addr is only valid if MREMAP_FIXED is specified */
if (flags & MREMAP_FIXED) {
- if (flags & MREMAP_MAYMOVE)
- ret = mremap_to(addr, old_len, new_addr, new_len);
- goto out;
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out;
+
+ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+ if ((new_addr <= addr) && (new_addr+new_len) > addr)
+ goto out;
+
+ if ((addr <= new_addr) && (addr+old_len) > new_addr)
+ goto out;
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
+
+ ret = do_munmap(mm, new_addr, new_len);
+ if (ret)
+ goto out;
}
/*
if (ret && old_len != new_len)
goto out;
ret = addr;
- goto out;
+ if (!(flags & MREMAP_FIXED) || (new_addr == addr))
+ goto out;
+ old_len = new_len;
}
/*
- * Ok, we need to grow..
+ * Ok, we need to grow.. or relocate.
*/
- vma = vma_to_resize(addr, old_len, new_len, &charged);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ ret = -EFAULT;
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ goto out;
+ if (is_vm_hugetlb_page(vma)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto out;
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
+ if (new_len > old_len)
+ goto out;
+ }
+ if (vma->vm_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ locked += new_len - old_len;
+ ret = -EAGAIN;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ goto out;
+ }
+ if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
+ ret = -ENOMEM;
goto out;
}
+ if (vma->vm_flags & VM_ACCOUNT) {
+ charged = (new_len - old_len) >> PAGE_SHIFT;
+ if (security_vm_enough_memory(charged))
+ goto out_nc;
+ }
+
/* old_len exactly to the end of the area..
+ * And we're not relocating the area.
*/
- if (old_len == vma->vm_end - addr) {
+ if (old_len == vma->vm_end - addr &&
+ !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
+ (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
+ unsigned long max_addr = TASK_SIZE;
+ if (vma->vm_next)
+ max_addr = vma->vm_next->vm_start;
/* can we just expand the current mapping? */
- if (vma_expandable(vma, new_len - old_len)) {
+ if (max_addr - addr >= new_len) {
int pages = (new_len - old_len) >> PAGE_SHIFT;
vma_adjust(vma, vma->vm_start,
*/
ret = -ENOMEM;
if (flags & MREMAP_MAYMOVE) {
- unsigned long map_flags = 0;
- if (vma->vm_flags & VM_MAYSHARE)
- map_flags |= MAP_SHARED;
-
- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
- vma->vm_pgoff +
- ((addr - vma->vm_start) >> PAGE_SHIFT),
- map_flags);
- if (new_addr & ~PAGE_MASK) {
- ret = new_addr;
- goto out;
- }
+ if (!(flags & MREMAP_FIXED)) {
+ unsigned long map_flags = 0;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+ new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+ vma->vm_pgoff, map_flags);
+ if (new_addr & ~PAGE_MASK) {
+ ret = new_addr;
+ goto out;
+ }
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
- if (ret)
- goto out;
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
+ }
ret = move_vma(vma, addr, old_len, new_len, new_addr);
}
out:
if (ret & ~PAGE_MASK)
vm_unacct_memory(charged);
+out_nc:
return ret;
}
cpuset_print_task_mems_allowed(current);
task_unlock(current);
dump_stack();
- mem_cgroup_print_oom_info(mem, p);
+ mem_cgroup_print_oom_info(mem, current);
show_mem();
if (sysctl_oom_dump_tasks)
dump_tasks(mem);
page = list_entry(list->prev, struct page, lru);
/* must delete as __free_one_page list manipulates */
list_del(&page->lru);
- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
- __free_one_page(page, zone, 0, page_private(page));
- trace_mm_page_pcpu_drain(page, 0, page_private(page));
+ __free_one_page(page, zone, 0, migratetype);
+ trace_mm_page_pcpu_drain(page, 0, migratetype);
} while (--count && --batch_free && !list_empty(list));
}
spin_unlock(&zone->lock);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
spin_unlock(&zone->lock);
if (!page)
goto failed;
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
-#include <linux/hugetlb.h>
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
pgd_t *pgd;
unsigned long next;
int err = 0;
- struct vm_area_struct *vma;
if (addr >= end)
return err;
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
-
- /* skip hugetlb vma to avoid hugepage PMD being cleared
- * in pmd_none_or_clear_bad(). */
- vma = find_vma(walk->mm, addr);
- if (vma && is_vm_hugetlb_page(vma)) {
- if (vma->vm_end < next)
- next = vma->vm_end;
- continue;
- }
-
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
- pgd++;
continue;
}
if (walk->pgd_entry)
err = walk_pud_range(pgd, addr, next, walk);
if (err)
break;
- pgd++;
- } while (addr = next, addr != end);
+ } while (pgd++, addr = next, addr != end);
return err;
}
*/
void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
{
- struct address_space *mapping = inode->i_mapping;
-
- /*
- * unmap_mapping_range is called twice, first simply for
- * efficiency so that truncate_inode_pages does fewer
- * single-page unmaps. However after this first call, and
- * before truncate_inode_pages finishes, it is possible for
- * private pages to be COWed, which remain after
- * truncate_inode_pages finishes, hence the second
- * unmap_mapping_range call must be made for correctness.
- */
- unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, new);
- unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+ if (new < old) {
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * unmap_mapping_range is called twice, first simply for
+ * efficiency so that truncate_inode_pages does fewer
+ * single-page unmaps. However after this first call, and
+ * before truncate_inode_pages finishes, it is possible for
+ * private pages to be COWed, which remain after
+ * truncate_inode_pages finishes, hence the second
+ * unmap_mapping_range call must be made for correctness.
+ */
+ unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+ truncate_inode_pages(mapping, new);
+ unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+ }
}
EXPORT_SYMBOL(truncate_pagecache);
#include <linux/module.h>
#include <linux/err.h>
#include <linux/sched.h>
-#include <linux/hugetlb.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
#include <asm/uaccess.h>
#define CREATE_TRACE_POINTS
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
-SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags,
- unsigned long, fd, unsigned long, pgoff)
-{
- struct file * file = NULL;
- unsigned long retval = -EBADF;
-
- if (!(flags & MAP_ANONYMOUS)) {
- if (unlikely(flags & MAP_HUGETLB))
- return -EINVAL;
- file = fget(fd);
- if (!file)
- goto out;
- } else if (flags & MAP_HUGETLB) {
- struct user_struct *user = NULL;
- /*
- * VM_NORESERVE is used because the reservations will be
- * taken when vm_ops->mmap() is called
- * A dummy user value is used because we are not locking
- * memory so no accounting is necessary
- */
- len = ALIGN(len, huge_page_size(&default_hstate));
- file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
- &user, HUGETLB_ANONHUGE_INODE);
- if (IS_ERR(file))
- return PTR_ERR(file);
- }
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(¤t->mm->mmap_sem);
- retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(¤t->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return retval;
-}
-
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
-/* for per-CPU blocks */
-static void purge_fragmented_blocks_allcpus(void);
-
/*
* Purges all lazily-freed vmap areas.
*
} else
spin_lock(&purge_lock);
- if (sync)
- purge_fragmented_blocks_allcpus();
-
rcu_read_lock();
list_for_each_entry_rcu(va, &vmap_area_list, list) {
if (va->flags & VM_LAZY_FREE) {
struct vmap_block_queue {
spinlock_t lock;
struct list_head free;
+ struct list_head dirty;
+ unsigned int nr_dirty;
};
struct vmap_block {
unsigned long free, dirty;
DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
- struct list_head free_list;
- struct rcu_head rcu_head;
- struct list_head purge;
+ union {
+ struct list_head free_list;
+ struct rcu_head rcu_head;
+ };
};
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
vbq = &get_cpu_var(vmap_block_queue);
vb->vbq = vbq;
spin_lock(&vbq->lock);
- list_add_rcu(&vb->free_list, &vbq->free);
+ list_add(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
put_cpu_var(vmap_cpu_blocks);
struct vmap_block *tmp;
unsigned long vb_idx;
+ BUG_ON(!list_empty(&vb->free_list));
+
vb_idx = addr_to_vb_idx(vb->va->va_start);
spin_lock(&vmap_block_tree_lock);
tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
call_rcu(&vb->rcu_head, rcu_free_vb);
}
-static void purge_fragmented_blocks(int cpu)
-{
- LIST_HEAD(purge);
- struct vmap_block *vb;
- struct vmap_block *n_vb;
- struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
-
- rcu_read_lock();
- list_for_each_entry_rcu(vb, &vbq->free, free_list) {
-
- if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
- continue;
-
- spin_lock(&vb->lock);
- if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
- vb->free = 0; /* prevent further allocs after releasing lock */
- vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
- bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
- bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
- spin_lock(&vbq->lock);
- list_del_rcu(&vb->free_list);
- spin_unlock(&vbq->lock);
- spin_unlock(&vb->lock);
- list_add_tail(&vb->purge, &purge);
- } else
- spin_unlock(&vb->lock);
- }
- rcu_read_unlock();
-
- list_for_each_entry_safe(vb, n_vb, &purge, purge) {
- list_del(&vb->purge);
- free_vmap_block(vb);
- }
-}
-
-static void purge_fragmented_blocks_thiscpu(void)
-{
- purge_fragmented_blocks(smp_processor_id());
-}
-
-static void purge_fragmented_blocks_allcpus(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- purge_fragmented_blocks(cpu);
-}
-
static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
struct vmap_block_queue *vbq;
struct vmap_block *vb;
unsigned long addr = 0;
unsigned int order;
- int purge = 0;
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
int i;
spin_lock(&vb->lock);
- if (vb->free < 1UL << order)
- goto next;
i = bitmap_find_free_region(vb->alloc_map,
VMAP_BBMAP_BITS, order);
- if (i < 0) {
- if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
- /* fragmented and no outstanding allocations */
- BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
- purge = 1;
+ if (i >= 0) {
+ addr = vb->va->va_start + (i << PAGE_SHIFT);
+ BUG_ON(addr_to_vb_idx(addr) !=
+ addr_to_vb_idx(vb->va->va_start));
+ vb->free -= 1UL << order;
+ if (vb->free == 0) {
+ spin_lock(&vbq->lock);
+ list_del_init(&vb->free_list);
+ spin_unlock(&vbq->lock);
}
- goto next;
- }
- addr = vb->va->va_start + (i << PAGE_SHIFT);
- BUG_ON(addr_to_vb_idx(addr) !=
- addr_to_vb_idx(vb->va->va_start));
- vb->free -= 1UL << order;
- if (vb->free == 0) {
- spin_lock(&vbq->lock);
- list_del_rcu(&vb->free_list);
- spin_unlock(&vbq->lock);
+ spin_unlock(&vb->lock);
+ break;
}
spin_unlock(&vb->lock);
- break;
-next:
- spin_unlock(&vb->lock);
}
-
- if (purge)
- purge_fragmented_blocks_thiscpu();
-
put_cpu_var(vmap_cpu_blocks);
rcu_read_unlock();
BUG_ON(!vb);
spin_lock(&vb->lock);
- BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
+ bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
vb->dirty += 1UL << order;
if (vb->dirty == VMAP_BBMAP_BITS) {
- BUG_ON(vb->free);
+ BUG_ON(vb->free || !list_empty(&vb->free_list));
spin_unlock(&vb->lock);
free_vmap_block(vb);
} else
vbq = &per_cpu(vmap_block_queue, i);
spin_lock_init(&vbq->lock);
INIT_LIST_HEAD(&vbq->free);
+ INIT_LIST_HEAD(&vbq->dirty);
+ vbq->nr_dirty = 0;
}
/* Import existing vmlist entries. */
}
EXPORT_SYMBOL_GPL(free_vm_area);
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
static struct vmap_area *node_to_va(struct rb_node *n)
{
return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
kfree(vms);
return NULL;
}
-#endif
/**
* pcpu_free_vm_areas - free vmalloc areas for percpu allocator
return low;
}
-static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
- int file)
-{
- if (file)
- return inactive_file_is_low(zone, sc);
- else
- return inactive_anon_is_low(zone, sc);
-}
-
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct zone *zone, struct scan_control *sc, int priority)
{
int file = is_file_lru(lru);
- if (is_active_lru(lru)) {
- if (inactive_list_is_low(zone, sc, file))
- shrink_active_list(nr_to_scan, zone, sc, priority, file);
+ if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
+ shrink_active_list(nr_to_scan, zone, sc, priority, file);
return 0;
}
+ if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
+ shrink_active_list(nr_to_scan, zone, sc, priority, file);
+ return 0;
+ }
return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
}
#endif
}
- /*
- * There is one ref for the state machine; a caller needs
- * one more to put it back, just like with the existing one.
- */
- ax25_cb_hold(ax25);
-
ax25_cb_add(ax25);
ax25->state = AX25_STATE_1;
{
int ret;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
switch(cmd) {
case EBT_SO_SET_ENTRIES:
ret = do_replace(sock_net(sk), user, len);
struct ebt_replace tmp;
struct ebt_table *t;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
rollback_registered(dev);
dev->reg_state = NETREG_UNREGISTERED;
}
- /*
- * Prevent userspace races by waiting until the network
- * device is fully setup before sending notifications.
- */
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
out:
return ret;
/* Notify protocols, that a new device appeared. */
call_netdevice_notifiers(NETDEV_REGISTER, dev);
- /*
- * Prevent userspace races by waiting until the network
- * device is fully setup before sending notifications.
- */
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
-
synchronize_net();
err = 0;
out:
#include <linux/string.h>
#include <linux/types.h>
#include <net/net_namespace.h>
-#include <linux/sched.h>
#include <net/dst.h>
while ((dst = next) != NULL) {
next = dst->next;
prefetch(&next->next);
- cond_resched();
if (likely(atomic_read(&dst->__refcnt))) {
last->next = dst;
last = dst;
wait_event_interruptible_timeout(t->queue,
t->control != 0,
HZ/10);
- try_to_freeze();
continue;
}
case NETDEV_UNREGISTER:
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
break;
+ case NETDEV_REGISTER:
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+ break;
case NETDEV_UP:
case NETDEV_DOWN:
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
break;
- case NETDEV_REGISTER:
case NETDEV_CHANGE:
case NETDEV_GOING_DOWN:
break;
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
-
- if (sock_flag(newsk, SOCK_TIMESTAMP) ||
- sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
- net_enable_timestamp();
}
out:
return newsk;
DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
"accept_source_route"),
- DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
if (in_dev) {
no_addr = in_dev->ifa_list == NULL;
rpf = IN_DEV_RPFILTER(in_dev);
- if (mark && !IN_DEV_SRC_VMARK(in_dev))
- fl.mark = 0;
}
rcu_read_unlock();
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
+ truesizes += frag->truesize;
}
- truesizes += frag->truesize;
}
/* Everything is OK. Generate! */
if (t && !IS_ERR(t)) {
struct arpt_getinfo info;
const struct xt_table_info *private = t->private;
-#ifdef CONFIG_COMPAT
- struct xt_table_info tmp;
+#ifdef CONFIG_COMPAT
if (compat) {
+ struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(NFPROTO_ARP);
private = &tmp;
if (t && !IS_ERR(t)) {
struct ipt_getinfo info;
const struct xt_table_info *private = t->private;
-#ifdef CONFIG_COMPAT
- struct xt_table_info tmp;
+#ifdef CONFIG_COMPAT
if (compat) {
+ struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET);
private = &tmp;
{
.ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS,
.procname = "ip_conntrack_buckets",
- .data = &init_net.ct.htable_size,
+ .data = &nf_conntrack_htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
struct hlist_nulls_node *n;
for (st->bucket = 0;
- st->bucket < net->ct.htable_size;
+ st->bucket < nf_conntrack_htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (!is_a_nulls(n))
head = rcu_dereference(head->next);
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= net->ct.htable_size)
+ if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
}
head = rcu_dereference(net->ct.hash[st->bucket].first);
#include <net/route.h>
#include <net/ip.h>
-#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
return err;
}
-static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
- struct sk_buff *skb)
-{
-#ifdef CONFIG_BRIDGE_NETFILTER
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
- return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
-#endif
- if (hooknum == NF_INET_PRE_ROUTING)
- return IP_DEFRAG_CONNTRACK_IN;
- else
- return IP_DEFRAG_CONNTRACK_OUT;
-}
-
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
#endif
/* Gather fragments. */
if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
- enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
- if (nf_ct_ipv4_gather_frags(skb, user))
+ if (nf_ct_ipv4_gather_frags(skb,
+ hooknum == NF_INET_PRE_ROUTING ?
+ IP_DEFRAG_CONNTRACK_IN :
+ IP_DEFRAG_CONNTRACK_OUT))
return NF_STOLEN;
}
return NF_ACCEPT;
static struct nf_conntrack_l3proto *l3proto __read_mostly;
+/* Calculated at init based on memory size */
+static unsigned int nf_nat_htable_size __read_mostly;
+
#define MAX_IP_NAT_PROTO 256
static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
__read_mostly;
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
-hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
hash = jhash_3words((__force u32)tuple->src.u3.ip,
(__force u32)tuple->src.u.all,
tuple->dst.protonum, 0);
- return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
+ return ((u64)hash * nf_nat_htable_size) >> 32;
}
/* Is this tuple already taken? (not by us) */
struct nf_conntrack_tuple *result,
const struct nf_nat_range *range)
{
- unsigned int h = hash_by_src(net, tuple);
+ unsigned int h = hash_by_src(tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
const struct hlist_node *n;
if (have_to_hash) {
unsigned int srchash;
- srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate exntension aera */
nat = nfct_nat(ct);
static int __net_init nf_nat_net_init(struct net *net)
{
- /* Leave them the same for the moment. */
- net->ipv4.nat_htable_size = net->ct.htable_size;
- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
- &net->ipv4.nat_vmalloced, 0);
+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
+ &net->ipv4.nat_vmalloced, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu();
nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
- net->ipv4.nat_htable_size);
+ nf_nat_htable_size);
}
static struct pernet_operations nf_nat_net_ops = {
return ret;
}
+ /* Leave them the same for the moment. */
+ nf_nat_htable_size = nf_conntrack_htable_size;
+
ret = register_pernet_subsys(&nf_nat_net_ops);
if (ret < 0)
goto cleanup_extend;
return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
}
-static inline struct net *ipv6_skb_net(struct sk_buff *skb)
-{
- return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
-}
-
/* Router Alert as of RFC 2711 */
static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
- struct net *net = ipv6_skb_net(skb);
u32 pkt_len;
+ struct net *net = dev_net(skb_dst(skb)->dev);
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
if (t && !IS_ERR(t)) {
struct ip6t_getinfo info;
const struct xt_table_info *private = t->private;
-#ifdef CONFIG_COMPAT
- struct xt_table_info tmp;
+#ifdef CONFIG_COMPAT
if (compat) {
+ struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET6);
private = &tmp;
#include <net/ipv6.h>
#include <net/inet_frag.h>
-#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
return nf_conntrack_confirm(skb);
}
-static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
- struct sk_buff *skb)
-{
-#ifdef CONFIG_BRIDGE_NETFILTER
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
- return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
-#endif
- if (hooknum == NF_INET_PRE_ROUTING)
- return IP6_DEFRAG_CONNTRACK_IN;
- else
- return IP6_DEFRAG_CONNTRACK_OUT;
-
-}
-
static unsigned int ipv6_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
if (skb->nfct)
return NF_ACCEPT;
- reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+ reasm = nf_ct_frag6_gather(skb);
+
/* queued */
if (reasm == NULL)
return NF_STOLEN;
/* Creation primitives. */
static __inline__ struct nf_ct_frag6_queue *
-fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
unsigned int hash;
arg.id = id;
- arg.user = user;
arg.src = src;
arg.dst = dst;
return 0;
}
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
{
struct sk_buff *clone;
struct net_device *dev = skb->dev;
if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
nf_ct_frag6_evictor();
- fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
+ fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
goto ret_orig;
struct inet_frag_queue q;
__be32 id; /* fragment id */
- u32 user;
struct in6_addr saddr;
struct in6_addr daddr;
struct ip6_create_arg *arg = a;
fq = container_of(q, struct frag_queue, q);
- return (fq->id == arg->id && fq->user == arg->user &&
+ return (fq->id == arg->id &&
ipv6_addr_equal(&fq->saddr, arg->src) &&
ipv6_addr_equal(&fq->daddr, arg->dst));
}
struct ip6_create_arg *arg = a;
fq->id = arg->id;
- fq->user = arg->user;
ipv6_addr_copy(&fq->saddr, arg->src);
ipv6_addr_copy(&fq->daddr, arg->dst);
}
unsigned int hash;
arg.id = id;
- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
arg.src = src;
arg.dst = dst;
sinfo->rx_packets = sta->rx_packets;
sinfo->tx_packets = sta->tx_packets;
- if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
- (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
+ if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = (s8)sta->last_signal;
}
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_conf *conf = &local->hw.conf;
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
return -EOPNOTSUPP;
__entry->ret = ret;
__entry->action = action;
__entry->tid = tid;
- __entry->ssn = ssn ? *ssn : 0;
+ __entry->ssn = *ssn;
),
TP_printk(
ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
- if (time_before(jiffies, ifibss->last_scan_completed +
- IEEE80211_IBSS_MERGE_INTERVAL))
- return;
-
if (ieee80211_sta_active_ibss(sdata))
return;
}
if (pos[1] != 0 &&
(pos[1] != ifibss->ssid_len ||
- memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
+ !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
/* Ignore ProbeReq for foreign SSID */
return;
}
unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
bool pspolling;
- bool scan_ps_enabled;
/*
* PS can only be enabled when we have exactly one managed
* interface (and monitors) in PS, this then points there.
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
-#include <net/ieee80211_radiotap.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "debugfs_netdev.h"
#include "mesh.h"
#include "led.h"
#include "driver-ops.h"
-#include "wme.h"
/**
* DOC: Interface list locking
WARN_ON(flushed);
}
-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
- struct sk_buff *skb)
-{
- return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
-}
-
static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_select_queue = ieee80211_netdev_select_queue,
};
-static u16 ieee80211_monitor_select_queue(struct net_device *dev,
- struct sk_buff *skb)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_hdr *hdr;
- struct ieee80211_radiotap_header *rtap = (void *)skb->data;
-
- if (local->hw.queues < 4)
- return 0;
-
- if (skb->len < 4 ||
- skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
- return 0; /* doesn't matter, frame will be dropped */
-
- hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
-
- if (!ieee80211_is_data(hdr->frame_control)) {
- skb->priority = 7;
- return ieee802_1d_to_ac[skb->priority];
- }
-
- skb->priority = 0;
- return ieee80211_downgrade_queue(local, skb);
-}
-
static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_select_queue = ieee80211_monitor_select_queue,
};
static void ieee80211_if_setup(struct net_device *dev)
ASSERT_RTNL();
- ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
- name, ieee80211_if_setup, local->hw.queues);
+ ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size,
+ name, ieee80211_if_setup);
if (!ndev)
return -ENOMEM;
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
*/
#define MESH_PREQ_MIN_INT 10
#define MESH_DIAM_TRAVERSAL_TIME 50
-/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before
- * timing out. This way it will remain ACTIVE and no data frames will be
- * unnecesarily held in the pending queue.
+/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their
+ * expiration
*/
#define MESH_PATH_REFRESH_TIME 1000
#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
}
if (mpath->flags & MESH_PATH_ACTIVE) {
- if (time_after(jiffies, mpath->exp_time -
+ if (time_after(jiffies, mpath->exp_time +
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
&& !memcmp(sdata->dev->dev_addr, hdr->addr4,
ETH_ALEN)
sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
IEEE80211_STA_BEACON_POLL);
- /*
- * Always handle WMM once after association regardless
- * of the first value the AP uses. Setting -1 here has
- * that effect because the AP values is an unsigned
- * 4-bit value.
- */
- sdata->u.mgd.wmm_last_param_set = -1;
-
ieee80211_led_assoc(local, 1);
sdata->vif.bss_conf.assoc = 1;
rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_ACTION:
- if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
- break;
-
+ /* XXX: differentiate, can only happen for CSA now! */
ieee80211_sta_process_chanswitch(sdata,
&mgmt->u.action.u.chan_switch.sw_elem,
ifmgd->associated);
mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
} else {
spin_lock_bh(&mppath->state_lock);
+ mppath->exp_time = jiffies;
if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
spin_unlock_bh(&mppath->state_lock);
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
- skb_set_queue_mapping(skb,
- ieee80211_select_queue(rx->sdata, fwd_skb));
- ieee80211_set_qos_hdr(local, skb);
+ ieee80211_select_queue(local, fwd_skb);
if (is_multicast_ether_addr(fwd_hdr->addr1))
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
}
break;
default:
- /* do not process rejected action frames */
- if (mgmt->u.action.category & 0x80)
- return RX_DROP_MONITOR;
-
return RX_CONTINUE;
}
static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
-
- local->scan_ps_enabled = false;
+ bool ps = false;
/* FIXME: what to do when local->pspolling is true? */
cancel_work_sync(&local->dynamic_ps_enable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- local->scan_ps_enabled = true;
+ ps = true;
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (!(local->scan_ps_enabled) ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
if (!local->ps_sdata)
ieee80211_send_nullfunc(local, sdata, 0);
- else if (local->scan_ps_enabled) {
+ else {
/*
* In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
* will send a nullfunc frame with the powersave bit set
*/
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- } else if (local->hw.conf.dynamic_ps_timeout > 0) {
- /*
- * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
- * had been running before leaving the operating channel,
- * restart the timer now and send a nullfunc frame to inform
- * the AP that we are awake.
- */
- ieee80211_send_nullfunc(local, sdata, 0);
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
}
mutex_lock(&local->scan_mtx);
- /*
- * It's ok to abort a not-yet-running scan (that
- * we have one at all will be verified by checking
- * local->scan_req next), but not to complete it
- * successfully.
- */
- if (WARN_ON(!local->scanning && !aborted))
- aborted = true;
+ if (WARN_ON(!local->scanning)) {
+ mutex_unlock(&local->scan_mtx);
+ return;
+ }
if (WARN_ON(!local->scan_req)) {
mutex_unlock(&local->scan_mtx);
if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
local->hw.conf.dynamic_ps_timeout > 0 &&
- !local->quiescing &&
!(local->scanning) && local->ps_sdata) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
ieee80211_stop_queues_by_reason(&local->hw,
return;
}
- ieee80211_set_qos_hdr(local, skb);
+ ieee80211_select_queue(local, skb);
ieee80211_tx(sdata, skb, false);
dev_put(sdata->dev);
}
if (!encrypt)
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- /* send all internal mgmt frames on VO */
- skb_set_queue_mapping(skb, 0);
-
/*
* The other path calling ieee80211_xmit is from the tasklet,
* and while we can handle concurrent transmissions locking
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
if (!skb_queue_empty(&local->pending[queue]))
tasklet_schedule(&local->tx_pending_tasklet);
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
- rcu_read_unlock();
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
__set_bit(reason, &local->queue_stop_reasons[queue]);
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
- rcu_read_unlock();
}
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
if (elen > left)
break;
- if (calc_crc && id < 64 && (filter & (1ULL << id)))
+ if (calc_crc && id < 64 && (filter & BIT(id)))
crc = crc32_be(crc, pos - 2, elen + 2);
switch (id) {
/* restart hardware */
if (local->open_count) {
- /*
- * Upon resume hardware can sometimes be goofy due to
- * various platform / driver / bus issues, so restarting
- * the device may at times not work immediately. Propagate
- * the error.
- */
res = drv_start(local);
- if (res) {
- WARN(local->suspended, "Harware became unavailable "
- "upon resume. This is could be a software issue"
- "prior to suspend or a harware issue\n");
- return res;
- }
ieee80211_led_radio(local, true);
}
}
-/* Indicate which queue to use. */
-u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+/* Indicate which queue to use. */
+static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
{
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta = NULL;
- u32 sta_flags = 0;
- const u8 *ra = NULL;
- bool qos = false;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (local->hw.queues < 4 || skb->len < 6) {
- skb->priority = 0; /* required for correct WPA/11i MIC */
- return min_t(u16, local->hw.queues - 1,
- ieee802_1d_to_ac[skb->priority]);
- }
-
- rcu_read_lock();
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_AP:
- ra = skb->data;
- break;
- case NL80211_IFTYPE_WDS:
- ra = sdata->u.wds.remote_addr;
- break;
-#ifdef CONFIG_MAC80211_MESH
- case NL80211_IFTYPE_MESH_POINT:
- /*
- * XXX: This is clearly broken ... but already was before,
- * because ieee80211_fill_mesh_addresses() would clear A1
- * except for multicast addresses.
- */
- break;
-#endif
- case NL80211_IFTYPE_STATION:
- ra = sdata->u.mgd.bssid;
- break;
- case NL80211_IFTYPE_ADHOC:
- ra = skb->data;
- break;
- default:
- break;
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ /* management frames go on AC_VO queue, but are sent
+ * without QoS control fields */
+ return 0;
}
- if (!sta && ra && !is_multicast_ether_addr(ra)) {
- sta = sta_info_get(local, ra);
- if (sta)
- sta_flags = get_sta_flags(sta);
+ if (0 /* injected */) {
+ /* use AC from radiotap */
}
- if (sta_flags & WLAN_STA_WME)
- qos = true;
-
- rcu_read_unlock();
-
- if (!qos) {
+ if (!ieee80211_is_data_qos(hdr->frame_control)) {
skb->priority = 0; /* required for correct WPA/11i MIC */
return ieee802_1d_to_ac[skb->priority];
}
* data frame has */
skb->priority = cfg80211_classify8021d(skb);
- return ieee80211_downgrade_queue(local, skb);
-}
-
-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
- struct sk_buff *skb)
-{
/* in case we are a client verify acm is not set for this ac */
while (unlikely(local->wmm_acm & BIT(skb->priority))) {
if (wme_downgrade_ac(skb)) {
return ieee802_1d_to_ac[skb->priority];
}
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
+void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (void *)skb->data;
-
- /* Fill in the QoS header if there is one. */
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 queue;
+ u8 tid;
+
+ queue = classify80211(local, skb);
+ if (unlikely(queue >= local->hw.queues))
+ queue = local->hw.queues - 1;
+
+ /*
+ * Now we know the 1d priority, fill in the QoS header if
+ * there is one (and we haven't done this before).
+ */
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
- u8 ack_policy = 0, tid;
-
+ u8 ack_policy = 0;
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
-
if (unlikely(local->wifi_wme_noack_test))
ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
QOS_CONTROL_ACK_POLICY_SHIFT;
*p++ = ack_policy | tid;
*p = 0;
}
+
+ skb_set_queue_mapping(skb, queue);
}
extern const int ieee802_1d_to_ac[8];
-u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb);
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
- struct sk_buff *skb);
-
+void ieee80211_select_queue(struct ieee80211_local *local,
+ struct sk_buff *skb);
#endif /* _WME_H */
if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
return -EINVAL;
- memset(usvc, 0, sizeof(*usvc));
-
usvc->af = nla_get_u16(nla_af);
#ifdef CONFIG_IP_VS_IPV6
if (usvc->af != AF_INET && usvc->af != AF_INET6)
if (!(nla_addr && nla_port))
return -EINVAL;
- memset(udest, 0, sizeof(*udest));
-
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_u16(nla_port);
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
-#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
struct nf_conn nf_conntrack_untracked __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
+static struct kmem_cache *nf_conntrack_cachep __read_mostly;
+
static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd;
return ((u64)h * size) >> 32;
}
-static inline u_int32_t hash_conntrack(const struct net *net,
- const struct nf_conntrack_tuple *tuple)
+static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
{
- return __hash_conntrack(tuple, net->ct.htable_size,
+ return __hash_conntrack(tuple, nf_conntrack_htable_size,
nf_conntrack_hash_rnd);
}
{
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- unsigned int hash = hash_conntrack(net, tuple);
+ unsigned int hash = hash_conntrack(tuple);
/* Disable BHs the entire time since we normally need to disable them
* at least once for the stats anyway.
void nf_conntrack_hash_insert(struct nf_conn *ct)
{
- struct net *net = nf_ct_net(ct);
unsigned int hash, repl_hash;
- hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
__nf_conntrack_hash_insert(ct, hash, repl_hash);
}
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
- hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
/* We're not in hash table, and we refuse to set up related
connections for unconfirmed conns. But packet copies and
struct net *net = nf_ct_net(ignored_conntrack);
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- unsigned int hash = hash_conntrack(net, tuple);
+ unsigned int hash = hash_conntrack(tuple);
/* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
int dropped = 0;
rcu_read_lock();
- for (i = 0; i < net->ct.htable_size; i++) {
+ for (i = 0; i < nf_conntrack_htable_size; i++) {
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
ct = NULL;
if (ct || cnt >= NF_CT_EVICTION_RANGE)
break;
-
- hash = (hash + 1) % net->ct.htable_size;
+ hash = (hash + 1) % nf_conntrack_htable_size;
}
rcu_read_unlock();
if (nf_conntrack_max &&
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
- unsigned int hash = hash_conntrack(net, orig);
+ unsigned int hash = hash_conntrack(orig);
if (!early_drop(net, hash)) {
atomic_dec(&net->ct.count);
if (net_ratelimit())
* Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_DESTROY_BY_RCU.
*/
- ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
+ ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
if (ct == NULL) {
pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
atomic_dec(&net->ct.count);
nf_ct_ext_destroy(ct);
atomic_dec(&net->ct.count);
nf_ct_ext_free(ct);
- kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+ kmem_cache_free(nf_conntrack_cachep, ct);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
struct hlist_nulls_node *n;
spin_lock_bh(&nf_conntrack_lock);
- for (; *bucket < net->ct.htable_size; (*bucket)++) {
+ for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
static void nf_conntrack_cleanup_init_net(void)
{
- /* wait until all references to nf_conntrack_untracked are dropped */
- while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
- schedule();
-
nf_conntrack_helper_fini();
nf_conntrack_proto_fini();
+ kmem_cache_destroy(nf_conntrack_cachep);
}
static void nf_conntrack_cleanup_net(struct net *net)
schedule();
goto i_see_dead_people;
}
+ /* wait until all references to nf_conntrack_untracked are dropped */
+ while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+ schedule();
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_conntrack_htable_size);
nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
- kmem_cache_destroy(net->ct.nf_conntrack_cachep);
- kfree(net->ct.slabname);
free_percpu(net->ct.stat);
}
{
int i, bucket, vmalloced, old_vmalloced;
unsigned int hashsize, old_size;
+ int rnd;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
- if (current->nsproxy->net_ns != &init_net)
- return -EOPNOTSUPP;
-
/* On boot, we can set this without any fancy locking. */
if (!nf_conntrack_htable_size)
return param_set_uint(val, kp);
if (!hash)
return -ENOMEM;
+ /* We have to rehahs for the new table anyway, so we also can
+ * use a newrandom seed */
+ get_random_bytes(&rnd, sizeof(rnd));
+
/* Lookups in the old hash might happen in parallel, which means we
* might get false negatives during connection lookup. New connections
* created because of a false negative won't make it into the hash
* though since that required taking the lock.
*/
spin_lock_bh(&nf_conntrack_lock);
- for (i = 0; i < init_net.ct.htable_size; i++) {
+ for (i = 0; i < nf_conntrack_htable_size; i++) {
while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
h = hlist_nulls_entry(init_net.ct.hash[i].first,
struct nf_conntrack_tuple_hash, hnnode);
hlist_nulls_del_rcu(&h->hnnode);
- bucket = __hash_conntrack(&h->tuple, hashsize,
- nf_conntrack_hash_rnd);
+ bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
- old_size = init_net.ct.htable_size;
+ old_size = nf_conntrack_htable_size;
old_vmalloced = init_net.ct.hash_vmalloc;
old_hash = init_net.ct.hash;
- init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
+ nf_conntrack_htable_size = hashsize;
init_net.ct.hash_vmalloc = vmalloced;
init_net.ct.hash = hash;
+ nf_conntrack_hash_rnd = rnd;
spin_unlock_bh(&nf_conntrack_lock);
nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
nf_conntrack_max);
+ nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+ sizeof(struct nf_conn),
+ 0, SLAB_DESTROY_BY_RCU, NULL);
+ if (!nf_conntrack_cachep) {
+ printk(KERN_ERR "Unable to create nf_conn slab cache\n");
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
ret = nf_conntrack_proto_init();
if (ret < 0)
goto err_proto;
if (ret < 0)
goto err_helper;
- /* Set up fake conntrack: to never be deleted, not in any hashes */
-#ifdef CONFIG_NET_NS
- nf_conntrack_untracked.ct_net = &init_net;
-#endif
- atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
- /* - and look it like as a confirmed connection */
- set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
-
return 0;
err_helper:
nf_conntrack_proto_fini();
err_proto:
+ kmem_cache_destroy(nf_conntrack_cachep);
+err_cache:
return ret;
}
ret = -ENOMEM;
goto err_stat;
}
-
- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
- if (!net->ct.slabname) {
- ret = -ENOMEM;
- goto err_slabname;
- }
-
- net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
- sizeof(struct nf_conn), 0,
- SLAB_DESTROY_BY_RCU, NULL);
- if (!net->ct.nf_conntrack_cachep) {
- printk(KERN_ERR "Unable to create nf_conn slab cache\n");
- ret = -ENOMEM;
- goto err_cache;
- }
-
- net->ct.htable_size = nf_conntrack_htable_size;
- net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
+ net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
&net->ct.hash_vmalloc, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
if (ret < 0)
goto err_ecache;
+ /* Set up fake conntrack:
+ - to never be deleted, not in any hashes */
+#ifdef CONFIG_NET_NS
+ nf_conntrack_untracked.ct_net = &init_net;
+#endif
+ atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+ /* - and look it like as a confirmed connection */
+ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
+
return 0;
err_ecache:
nf_conntrack_expect_fini(net);
err_expect:
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_conntrack_htable_size);
err_hash:
- kmem_cache_destroy(net->ct.nf_conntrack_cachep);
-err_cache:
- kfree(net->ct.slabname);
-err_slabname:
free_percpu(net->ct.stat);
err_stat:
return ret;
#endif /* CONFIG_PROC_FS */
}
-module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
+module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
int nf_conntrack_expect_init(struct net *net)
{
if (net_eq(net, &init_net)) {
if (!nf_ct_expect_hsize) {
- nf_ct_expect_hsize = net->ct.htable_size / 256;
+ nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
if (!nf_ct_expect_hsize)
nf_ct_expect_hsize = 1;
}
struct nf_ct_ftp_master *info, int dir,
struct sk_buff *skb)
{
- unsigned int i, oldest;
+ unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
/* Look for oldest: if we find exact match, we're done. */
for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
if (info->seq_aft_nl[dir][i] == nl_seq)
return;
+
+ if (oldest == info->seq_aft_nl_num[dir] ||
+ before(info->seq_aft_nl[dir][i],
+ info->seq_aft_nl[dir][oldest]))
+ oldest = i;
}
if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
- } else {
- if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1]))
- oldest = 0;
- else
- oldest = 1;
-
- if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
- info->seq_aft_nl[dir][oldest] = nl_seq;
+ } else if (oldest != NUM_SEQ_TO_REMEMBER &&
+ after(nl_seq, info->seq_aft_nl[dir][oldest])) {
+ info->seq_aft_nl[dir][oldest] = nl_seq;
}
}
/* Get rid of expecteds, set helpers to NULL. */
hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
unhelp(h, me);
- for (i = 0; i < net->ct.htable_size; i++) {
+ for (i = 0; i < nf_conntrack_htable_size; i++) {
hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
unhelp(h, me);
}
rcu_read_lock();
last = (struct nf_conn *)cb->args[1];
- for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
+ for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
hnnode) {
struct hlist_nulls_node *n;
for (st->bucket = 0;
- st->bucket < net->ct.htable_size;
+ st->bucket < nf_conntrack_htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (!is_a_nulls(n))
head = rcu_dereference(head->next);
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= net->ct.htable_size)
+ if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
}
head = rcu_dereference(net->ct.hash[st->bucket].first);
{
.ctl_name = NET_NF_CONNTRACK_BUCKETS,
.procname = "nf_conntrack_buckets",
- .data = &init_net.ct.htable_size,
+ .data = &nf_conntrack_htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
goto out_kmemdup;
table[1].data = &net->ct.count;
- table[2].data = &net->ct.htable_size;
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
}
static bool
-conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par,
- u16 state_mask, u16 status_mask)
+conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
if (test_bit(IPS_DST_NAT_BIT, &ct->status))
statebit |= XT_CONNTRACK_STATE_DNAT;
}
- if (!!(state_mask & statebit) ^
+ if (!!(info->state_mask & statebit) ^
!(info->invert_flags & XT_CONNTRACK_STATE))
return false;
}
return false;
if ((info->match_flags & XT_CONNTRACK_STATUS) &&
- (!!(status_mask & ct->status) ^
+ (!!(info->status_mask & ct->status) ^
!(info->invert_flags & XT_CONNTRACK_STATUS)))
return false;
static bool
conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
{
- const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo;
+ struct xt_match_param newpar = *par;
- return conntrack_mt(skb, par, info->state_mask, info->status_mask);
-}
-
-static bool
-conntrack_mt_v2(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
-
- return conntrack_mt(skb, par, info->state_mask, info->status_mask);
+ newpar.matchinfo = *info;
+ return conntrack_mt(skb, &newpar);
}
static bool conntrack_mt_check(const struct xt_mtchk_param *par)
return true;
}
+static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par)
+{
+ struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ struct xt_conntrack_mtinfo2 *up;
+ int ret = conntrack_mt_check(par);
+
+ if (ret < 0)
+ return ret;
+
+ up = kmalloc(sizeof(*up), GFP_KERNEL);
+ if (up == NULL) {
+ nf_ct_l3proto_module_put(par->family);
+ return -ENOMEM;
+ }
+
+ /*
+ * The strategy here is to minimize the overhead of v1 matching,
+ * by prebuilding a v2 struct and putting the pointer into the
+ * v1 dataspace.
+ */
+ memcpy(up, info, offsetof(typeof(*info), state_mask));
+ up->state_mask = info->state_mask;
+ up->status_mask = info->status_mask;
+ *(void **)info = up;
+ return true;
+}
+
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_l3proto_module_put(par->family);
}
+static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par)
+{
+ struct xt_conntrack_mtinfo2 **info = par->matchinfo;
+ kfree(*info);
+ conntrack_mt_destroy(par);
+}
+
static struct xt_match conntrack_mt_reg[] __read_mostly = {
{
.name = "conntrack",
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
.match = conntrack_mt_v1,
- .checkentry = conntrack_mt_check,
- .destroy = conntrack_mt_destroy,
+ .checkentry = conntrack_mt_check_v1,
+ .destroy = conntrack_mt_destroy_v1,
.me = THIS_MODULE,
},
{
.revision = 2,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo2),
- .match = conntrack_mt_v2,
+ .match = conntrack_mt,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
dptr = skb_push(skb, 1);
*dptr = AX25_P_NETROM;
- ax25s = nr_neigh->ax25;
- nr_neigh->ax25 = ax25_send_frame(skb, 256,
- (ax25_address *)dev->dev_addr,
- &nr_neigh->callsign,
- nr_neigh->digipeat, nr_neigh->dev);
- if (ax25s)
+ ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
+ if (nr_neigh->ax25 && ax25s) {
+ /* We were already holding this ax25_cb */
ax25_cb_put(ax25s);
+ }
+ nr_neigh->ax25 = ax25s;
dev_put(dev);
ret = (nr_neigh->ax25 != NULL);
status = TP_STATUS_SEND_REQUEST;
err = dev_queue_xmit(skb);
- if (unlikely(err > 0)) {
- err = net_xmit_errno(err);
- if (err && __packet_get_status(po, ph) ==
- TP_STATUS_AVAILABLE) {
- /* skb was destructed already */
- skb = NULL;
- goto out_status;
- }
- /*
- * skb was dropped but not destructed yet;
- * let's treat it like congestion or err < 0
- */
- err = 0;
- }
+ if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
+ goto out_xmit;
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
} while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
err = len_sum;
goto out_put;
+out_xmit:
+ skb->destructor = sock_wfree;
+ atomic_dec(&po->tx_ring.pending);
out_status:
__packet_set_status(po, ph, status);
kfree_skb(skb);
static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
{
ax25_address *rose_call;
- ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
rose_call = (ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
- ax25s = neigh->ax25;
neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
- if (ax25s)
- ax25_cb_put(ax25s);
return (neigh->ax25 != NULL);
}
static int rose_link_up(struct rose_neigh *neigh)
{
ax25_address *rose_call;
- ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
rose_call = (ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
- ax25s = neigh->ax25;
neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
- if (ax25s)
- ax25_cb_put(ax25s);
return (neigh->ax25 != NULL);
}
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
- if (rose_neigh->ax25)
- ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
- if (rose_neigh->ax25)
- ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
if (rose_neigh != NULL) {
rose_neigh->ax25 = NULL;
- ax25_cb_put(ax25);
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
cred->cr_uid);
gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
- if (PTR_ERR(gss_msg) == -EAGAIN) {
+ if (IS_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
* shouldn't normally hit this case on a refresh. */
warn_gssd();
p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
if (IS_ERR(p)) {
err = PTR_ERR(p);
- switch (err) {
- case -EACCES:
- gss_msg->msg.errno = err;
- err = mlen;
- break;
- case -EFAULT:
- case -ENOMEM:
- case -EINVAL:
- case -ENOSYS:
- gss_msg->msg.errno = -EAGAIN;
- break;
- default:
- printk(KERN_CRIT "%s: bad return from "
- "gss_fill_context: %ld\n", __func__, err);
- BUG();
- }
+ gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES;
goto err_release_msg;
}
gss_msg->ctx = gss_get_ctx(ctx);
struct krb5_ctx *ctx;
int tmp;
- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
- p = ERR_PTR(-ENOMEM);
+ if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS)))
goto out_err;
- }
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
if (IS_ERR(p))
struct gss_ctx **ctx_id)
{
if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
- return -ENOMEM;
+ return GSS_S_FAILURE;
(*ctx_id)->mech_type = gss_mech_get(mech);
return mech->gm_ops
spin_unlock_bh(&pool->sp_lock);
len = 0;
- if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
- !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
newxpt = xprt->xpt_ops->xpo_accept(xprt);
if (newxpt) {
}
}
- /*
- * We might be coming here because the driver reported
- * a successful association at the same time as the
- * user requested a deauth. In that case, we will have
- * removed the BSS from the auth_bsses list due to the
- * deauth request when the assoc response makes it. If
- * the two code paths acquire the lock the other way
- * around, that's just the standard situation of a
- * deauth being requested while connected.
- */
- if (!bss)
- goto out;
+ WARN_ON(!bss);
} else if (wdev->conn) {
cfg80211_sme_failed_assoc(wdev);
need_connect_result = false;
request->wiphy_idx = WIPHY_IDX_STALE;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
- request->initiator = NL80211_REGDOM_SET_BY_USER;
+ request->initiator = NL80211_REGDOM_SET_BY_USER,
queue_regulatory_request(request);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
- wdev->wext.connect.ssid_len = 0;
#endif
}
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
# always enable default capabilities
-obj-y += commoncap.o
-obj-$(CONFIG_MMU) += min_addr.o
+obj-y += commoncap.o min_addr.o
# Object file lists
obj-$(CONFIG_SECURITY) += security.o capability.o
*/
long keyctl_session_to_parent(void)
{
-#ifdef TIF_NOTIFY_RESUME
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
struct cred *cred, *oldcred;
error_keyring:
key_ref_put(keyring_r);
return ret;
-
-#else /* !TIF_NOTIFY_RESUME */
- /*
- * To be removed when TIF_NOTIFY_RESUME has been implemented on
- * m68k/xtensa
- */
-#warning TIF_NOTIFY_RESUME not implemented
- return -EOPNOTSUPP;
-#endif /* !TIF_NOTIFY_RESUME */
}
/*****************************************************************************/
initrlim = init_task.signal->rlim + i;
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
}
- update_rlimit_cpu(current->signal->rlim[RLIMIT_CPU].rlim_cur);
+ update_rlimit_cpu(rlim->rlim_cur);
}
}
struct snd_hrtimer {
struct snd_timer *timer;
struct hrtimer hrt;
- atomic_t running;
};
static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
{
struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
struct snd_timer *t = stime->timer;
-
- if (!atomic_read(&stime->running))
- return HRTIMER_NORESTART;
-
hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
snd_timer_interrupt(stime->timer, t->sticks);
-
- if (!atomic_read(&stime->running))
- return HRTIMER_NORESTART;
return HRTIMER_RESTART;
}
hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
stime->timer = t;
stime->hrt.function = snd_hrtimer_callback;
- atomic_set(&stime->running, 0);
t->private_data = stime;
return 0;
}
{
struct snd_hrtimer *stime = t->private_data;
- atomic_set(&stime->running, 0);
- hrtimer_cancel(&stime->hrt);
hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
HRTIMER_MODE_REL);
- atomic_set(&stime->running, 1);
return 0;
}
static int snd_hrtimer_stop(struct snd_timer *t)
{
struct snd_hrtimer *stime = t->private_data;
- atomic_set(&stime->running, 0);
+
+ hrtimer_cancel(&stime->hrt);
return 0;
}
/* alloc virtual 'dma' area */
if (runtime->dma_area)
vfree(runtime->dma_area);
- runtime->dma_area = vmalloc_user(size);
+ runtime->dma_area = vmalloc(size);
if (runtime->dma_area == NULL)
return -ENOMEM;
runtime->dma_bytes = size;
0x10140554, /* Thinkpad T42p/R50p */
0x10140567, /* Thinkpad T43p 2668-G7U */
0x10140581, /* Thinkpad X41-2527 */
- 0x10280160, /* Dell Dimension 2400 */
0x104380b0, /* Asus A7V8X-MX */
0x11790241, /* Toshiba Satellite A-15 S127 */
0x144dc01a, /* Samsung NP-X20C004/SEG */
MODULE_DEVICE_TABLE(pci, snd_atiixp_ids);
static struct snd_pci_quirk atiixp_quirks[] __devinitdata = {
- SND_PCI_QUIRK(0x105b, 0x0c81, "Foxconn RC4107MA-RS2", 0),
SND_PCI_QUIRK(0x15bd, 0x3100, "DFI RS482", 0),
{ } /* terminator */
};
static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
{
- return atc->vm->get_ptp_phys(atc->vm, index);
+ struct ct_vm *vm;
+ void *kvirt_addr;
+ unsigned long phys_addr;
+
+ vm = atc->vm;
+ kvirt_addr = vm->get_ptp_virt(vm, index);
+ if (kvirt_addr == NULL)
+ phys_addr = (~0UL);
+ else
+ phys_addr = virt_to_phys(kvirt_addr);
+
+ return phys_addr;
}
static unsigned int convert_format(snd_pcm_format_t snd_format)
}
/* Set up device virtual memory management object */
- err = ct_vm_create(&atc->vm, pci);
+ err = ct_vm_create(&atc->vm);
if (err < 0)
goto error1;
return NULL;
}
- ptp = (unsigned long *)vm->ptp[0].area;
+ ptp = vm->ptp[0];
pte_start = (block->addr >> CT_PAGE_SHIFT);
pages = block->size >> CT_PAGE_SHIFT;
for (i = 0; i < pages; i++) {
}
/* *
- * return the host physical addr of the @index-th device
- * page table page on success, or ~0UL on failure.
- * The first returned ~0UL indicates the termination.
+ * return the host (kmalloced) addr of the @index-th device
+ * page talbe page on success, or NULL on failure.
+ * The first returned NULL indicates the termination.
* */
-static dma_addr_t
-ct_get_ptp_phys(struct ct_vm *vm, int index)
+static void *
+ct_get_ptp_virt(struct ct_vm *vm, int index)
{
- dma_addr_t addr;
+ void *addr;
- addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
+ addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index];
return addr;
}
-int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
+int ct_vm_create(struct ct_vm **rvm)
{
struct ct_vm *vm;
struct ct_vm_block *block;
- int i, err = 0;
+ int i;
*rvm = NULL;
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
- err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pci),
- PAGE_SIZE, &vm->ptp[i]);
- if (err < 0)
+ vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vm->ptp[i])
break;
}
- if (err < 0) {
+ if (!i) {
/* no page table pages are allocated */
- ct_vm_destroy(vm);
+ kfree(vm);
return -ENOMEM;
}
vm->size = CT_ADDRS_PER_PAGE * i;
+ /* Initialise remaining ptps */
+ for (; i < CT_PTP_NUM; i++)
+ vm->ptp[i] = NULL;
+
vm->map = ct_vm_map;
vm->unmap = ct_vm_unmap;
- vm->get_ptp_phys = ct_get_ptp_phys;
+ vm->get_ptp_virt = ct_get_ptp_virt;
INIT_LIST_HEAD(&vm->unused);
INIT_LIST_HEAD(&vm->used);
block = kzalloc(sizeof(*block), GFP_KERNEL);
/* free allocated page table pages */
for (i = 0; i < CT_PTP_NUM; i++)
- snd_dma_free_pages(&vm->ptp[i]);
+ kfree(vm->ptp[i]);
vm->size = 0;
#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/pci.h>
-#include <sound/memalloc.h>
/* The chip can handle the page table of 4k pages
* (emu20k1 can handle even 8k pages, but we don't use it right now)
/* Virtual memory management object for card device */
struct ct_vm {
- struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */
+ void *ptp[CT_PTP_NUM]; /* Device page table pages */
unsigned int size; /* Available addr space in bytes */
struct list_head unused; /* List of unused blocks */
struct list_head used; /* List of used blocks */
int size);
/* Unmap device logical addr area. */
void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
- dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
+ void *(*get_ptp_virt)(struct ct_vm *vm, int index);
};
-int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci);
+int ct_vm_create(struct ct_vm **rvm);
void ct_vm_destroy(struct ct_vm *vm);
#endif /* CTVMEM_H */
if (!bdl_pos_adj[chip->dev_index])
return 1; /* no delayed ack */
- if (WARN_ONCE(!azx_dev->period_bytes,
- "hda-intel: zero azx_dev->period_bytes"))
- return 0; /* this shouldn't happen! */
if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
return 0; /* NG - it's below the period boundary */
return 1; /* OK, it's fine */
}
}
- /* disable 64bit DMA address for Teradici */
- /* it does not work with device 6549:1200 subsys e4a2:040b */
- if (chip->driver_type == AZX_DRIVER_TERA)
- gcap &= ~ICH6_GCAP_64OK;
-
/* allow 64bit DMA address if supported by H/W */
if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
{ PCI_DEVICE(0x10de, 0x0ac1), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0ac2), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0ac3), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0be2), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0be3), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0be4), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d94), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d95), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d96), .driver_data = AZX_DRIVER_NVIDIA },
{ .id = 0x80862801, .name = "G45 DEVBLC", .patch = patch_intel_hdmi },
{ .id = 0x80862802, .name = "G45 DEVCTG", .patch = patch_intel_hdmi },
{ .id = 0x80862803, .name = "G45 DEVELK", .patch = patch_intel_hdmi },
- { .id = 0x80862804, .name = "G45 DEVIBX", .patch = patch_intel_hdmi_ibexpeak },
+ { .id = 0x80862804, .name = "G45 DEVIBX", .patch = patch_intel_hdmi },
{ .id = 0x80860054, .name = "Q57 DEVIBX", .patch = patch_intel_hdmi_ibexpeak },
{ .id = 0x10951392, .name = "SiI1392 HDMI", .patch = patch_intel_hdmi },
{} /* terminator */
"Speaker Playback Switch",
"Mono Playback Switch",
"IEC958 Playback Switch",
- "Line-Out Playback Switch",
- "PCM Playback Switch",
NULL,
};
HDA_BIND_MUTE ("Surround Playback Switch", 0x0d, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("LFE Playback Volume", 0x0e, 0x00, HDA_OUTPUT),
HDA_BIND_MUTE ("LFE Playback Switch", 0x0e, 0x02, HDA_INPUT),
- HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0f, 0x00, HDA_OUTPUT),
- HDA_BIND_MUTE ("Headphone Playback Switch", 0x0f, 0x02, HDA_INPUT),
+ HDA_CODEC_VOLUME("HP Playback Volume", 0x0f, 0x00, HDA_OUTPUT),
+ HDA_BIND_MUTE ("HP Playback Switch", 0x0f, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
{0x14, AC_VERB_SET_CONNECT_SEL, 0x03},
- {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
/* Front Mic pin: input vref at 80% */
{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
spec->autocfg.speaker_pins[0] = 0x14;
}
-static void alc885_mb5_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x14, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_amp_stereo(codec, 0x18, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
- snd_hda_codec_amp_stereo(codec, 0x1a, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-
-}
-
-static void alc885_mb5_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- /* Headphone insertion or removal. */
- if ((res >> 26) == ALC880_HP_EVENT)
- alc885_mb5_automute(codec);
-}
-
static struct hda_verb alc882_targa_verbs[] = {
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x0579, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
- SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC882_AUTO),
+ SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x3729, "MSI S420", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3783, "NEC S970", ALC883_TARGA_DIG),
.input_mux = &mb5_capture_source,
.dig_out_nid = ALC882_DIGOUT_NID,
.dig_in_nid = ALC882_DIGIN_NID,
- .unsol_event = alc885_mb5_unsol_event,
- .init_hook = alc885_mb5_automute,
},
[ALC885_MACPRO] = {
.mixers = { alc882_macpro_mixer },
.dac_nids = alc883_dac_nids,
.num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
.adc_nids = alc889_adc_nids,
- .capsrc_nids = alc889_capsrc_nids,
- .capsrc_nids = alc889_capsrc_nids,
.dig_out_nid = ALC883_DIGOUT_NID,
.dig_in_nid = ALC883_DIGIN_NID,
.slave_dig_outs = alc883_slave_dig_outs,
.dac_nids = alc883_dac_nids,
.adc_nids = alc883_adc_nids_alt,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
- .capsrc_nids = alc883_capsrc_nids,
.dig_out_nid = ALC883_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
.channel_mode = alc883_3ST_2ch_modes,
.dac_nids = alc883_dac_nids,
.adc_nids = alc883_adc_nids_alt,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
- .capsrc_nids = alc883_capsrc_nids,
.num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes),
.channel_mode = alc883_sixstack_modes,
.input_mux = &alc883_capture_source,
.dac_nids = alc883_dac_nids,
.adc_nids = alc883_adc_nids_alt,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
- .capsrc_nids = alc883_capsrc_nids,
.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_lenovo_101e_capture_source,
alc880_gpio1_init_verbs },
.adc_nids = alc883_adc_nids,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
- .capsrc_nids = alc883_capsrc_nids,
.dac_nids = alc883_dac_nids,
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
.channel_mode = alc889A_mb31_6ch_modes,
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
- spec->autocfg.speaker_pins[0] = 0x14;
+ spec->autocfg.speaker_pins[0] = 0x0c; /* HACK: not actually a pin */
}
static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = {
.num_channel_mode = ARRAY_SIZE(alc262_modes),
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
- .unsol_event = alc_sku_unsol_event,
+ .unsol_event = alc_automute_amp_unsol_event,
.setup = alc262_hp_t5735_setup,
- .init_hook = alc_inithook,
+ .init_hook = alc_automute_amp,
},
[ALC262_HP_RP5700] = {
.mixers = { alc262_hp_rp5700_mixer },
spec->stream_digital_playback = &alc861_pcm_digital_playback;
spec->stream_digital_capture = &alc861_pcm_digital_capture;
- if (!spec->cap_mixer)
- set_capture_mixer(codec);
set_beep_amp(spec, 0x23, 0, HDA_OUTPUT);
spec->vmaster_nid = 0x03;
static int alc861vd_auto_create_input_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
- return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x22, 0);
+ return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0);
}
return 0;
}
-/*
- * suspend/resume
- * */
-
-#ifdef CONFIG_PM
-static int juli_resume(struct snd_ice1712 *ice)
-{
- struct snd_akm4xxx *ak = ice->akm;
- struct juli_spec *spec = ice->spec;
- /* akm4358 un-reset, un-mute */
- snd_akm4xxx_reset(ak, 0);
- /* reinit ak4114 */
- snd_ak4114_reinit(spec->ak4114);
- return 0;
-}
-
-static int juli_suspend(struct snd_ice1712 *ice)
-{
- struct snd_akm4xxx *ak = ice->akm;
- /* akm4358 reset and soft-mute */
- snd_akm4xxx_reset(ak, 1);
- return 0;
-}
-#endif
-
/*
* initialize the chip
*/
ice->set_spdif_clock = juli_set_spdif_clock;
ice->spdif.ops.open = juli_spdif_in_open;
-
-#ifdef CONFIG_PM
- ice->pm_resume = juli_resume;
- ice->pm_suspend = juli_suspend;
- ice->pm_suspend_enabled = 1;
-#endif
-
return 0;
}
return 0; /* already enough large */
vfree(runtime->dma_area);
}
- runtime->dma_area = vmalloc_32_user(size);
+ runtime->dma_area = vmalloc_32(size);
if (! runtime->dma_area)
return -ENOMEM;
runtime->dma_bytes = size;
iface |= 0x3 << 8;
break;
case SND_SOC_DAIFMT_DSP_B:
- iface |= 0x3 << 8 | WM8350_AIF_LRCLK_INV;
+ iface |= 0x3 << 8; /* lg not sure which mode */
break;
default:
return -EINVAL;
/* filter coefficient */
switch (params_rate(params)) {
- case 8000:
+ case SNDRV_PCM_RATE_8000:
adn |= 0x5 << 1;
break;
- case 11025:
+ case SNDRV_PCM_RATE_11025:
adn |= 0x4 << 1;
break;
- case 16000:
+ case SNDRV_PCM_RATE_16000:
adn |= 0x3 << 1;
break;
- case 22050:
+ case SNDRV_PCM_RATE_22050:
adn |= 0x2 << 1;
break;
- case 32000:
+ case SNDRV_PCM_RATE_32000:
adn |= 0x1 << 1;
break;
- case 44100:
- case 48000:
+ case SNDRV_PCM_RATE_44100:
+ case SNDRV_PCM_RATE_48000:
break;
}
struct i2c_client *i2c = codec->control_data;
int i;
u16 *reg_cache = codec->reg_cache;
- u16 *tmp_cache = kmemdup(reg_cache, sizeof(wm8903_reg_defaults),
+ u16 *tmp_cache = kmemdup(codec->reg_cache, sizeof(wm8903_reg_defaults),
GFP_KERNEL);
/* Bring the codec back up to standby first to minimise pop/clicks */
for (i = 2; i < ARRAY_SIZE(wm8903_reg_defaults); i++)
if (tmp_cache[i] != reg_cache[i])
snd_soc_write(codec, i, tmp_cache[i]);
- kfree(tmp_cache);
} else {
dev_err(&i2c->dev, "Failed to allocate temporary cache\n");
}
iface |= (1 << 9);
switch (params_rate(params)) {
- case 8000:
+ case SNDRV_PCM_RATE_8000:
addcntrl |= (0x5 << 1);
break;
- case 11025:
+ case SNDRV_PCM_RATE_11025:
addcntrl |= (0x4 << 1);
break;
- case 16000:
+ case SNDRV_PCM_RATE_16000:
addcntrl |= (0x3 << 1);
break;
- case 22050:
+ case SNDRV_PCM_RATE_22050:
addcntrl |= (0x2 << 1);
break;
- case 32000:
+ case SNDRV_PCM_RATE_32000:
addcntrl |= (0x1 << 1);
break;
- case 44100:
- case 48000:
+ case SNDRV_PCM_RATE_44100:
+ case SNDRV_PCM_RATE_48000:
break;
}
ret = snd_soc_write(codec, WM8940_ADDCNTRL, addcntrl);
};
#define WM8974_POWER1_BIASEN 0x08
-#define WM8974_POWER1_BUFIOEN 0x04
+#define WM8974_POWER1_BUFIOEN 0x10
struct wm8974_priv {
struct snd_soc_codec codec;
/* filter coefficient */
switch (params_rate(params)) {
- case 8000:
+ case SNDRV_PCM_RATE_8000:
adn |= 0x5 << 1;
break;
- case 11025:
+ case SNDRV_PCM_RATE_11025:
adn |= 0x4 << 1;
break;
- case 16000:
+ case SNDRV_PCM_RATE_16000:
adn |= 0x3 << 1;
break;
- case 22050:
+ case SNDRV_PCM_RATE_22050:
adn |= 0x2 << 1;
break;
- case 32000:
+ case SNDRV_PCM_RATE_32000:
adn |= 0x1 << 1;
break;
- case 44100:
- case 48000:
+ case SNDRV_PCM_RATE_44100:
+ case SNDRV_PCM_RATE_48000:
break;
}
{
u16 *cache = codec->reg_cache;
- if (reg < 0x7c)
- soc_ac97_ops.write(codec->ac97, reg, val);
+ soc_ac97_ops.write(codec->ac97, reg, val);
reg = reg >> 1;
if (reg < (ARRAY_SIZE(wm9712_reg)))
cache[reg] = val;
return 0; /* already large enough */
vfree(runtime->dma_area);
}
- runtime->dma_area = vmalloc_user(size);
+ runtime->dma_area = vmalloc(size);
if (!runtime->dma_area)
return -ENOMEM;
runtime->dma_bytes = size;
struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
struct snd_usb_substream *subs = &as->substream[direction];
- if (!as->chip->shutdown && subs->interface >= 0) {
+ if (subs->interface >= 0) {
usb_set_interface(subs->dev, subs->interface, 0);
subs->interface = -1;
}
static int
process_comm_event(event_t *event)
{
- pid_set_comm(event->comm.tid, event->comm.comm);
+ pid_set_comm(event->comm.pid, event->comm.comm);
return 0;
}
static int
static int
kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
{
- struct _irqfd *irqfd, *tmp;
+ struct _irqfd *irqfd;
struct file *file = NULL;
struct eventfd_ctx *eventfd = NULL;
int ret;
init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
- spin_lock_irq(&kvm->irqfds.lock);
-
- ret = 0;
- list_for_each_entry(tmp, &kvm->irqfds.items, list) {
- if (irqfd->eventfd != tmp->eventfd)
- continue;
- /* This fd is used for another irq already. */
- ret = -EBUSY;
- spin_unlock_irq(&kvm->irqfds.lock);
- goto fail;
- }
-
events = file->f_op->poll(file, &irqfd->pt);
+ spin_lock_irq(&kvm->irqfds.lock);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
spin_unlock_irq(&kvm->irqfds.lock);
int irq_source_id;
mutex_lock(&kvm->irq_lock);
- irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
+ irq_source_id = find_first_zero_bit(bitmap,
+ sizeof(kvm->arch.irq_sources_bitmap));
- if (irq_source_id >= BITS_PER_LONG) {
+ if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
- irq_source_id = -EFAULT;
- goto unlock;
+ return -EFAULT;
}
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
set_bit(irq_source_id, bitmap);
-unlock:
mutex_unlock(&kvm->irq_lock);
return irq_source_id;
mutex_lock(&kvm->irq_lock);
if (irq_source_id < 0 ||
- irq_source_id >= BITS_PER_LONG) {
+ irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
- goto unlock;
+ return;
}
- clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
- if (!irqchip_in_kernel(kvm))
- goto unlock;
-
for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
-unlock:
+ clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
mutex_unlock(&kvm->irq_lock);
}