unfreeze_fs: called when VFS is unlocking a filesystem and making it writable
again.
- statfs: called when the VFS needs to get filesystem statistics. This
- is called with the kernel lock held
+ statfs: called when the VFS needs to get filesystem statistics.
remount_fs: called when the filesystem is remounted. This is called
with the kernel lock held
This sets up the device clock rate, SPI mode, and word sizes.
Drivers may change the defaults provided by board_info, and then
call spi_setup(spi) to invoke this routine. It may sleep.
+
Unless each SPI slave has its own configuration registers, don't
change them right away ... otherwise drivers could corrupt I/O
that's in progress for other SPI devices.
+ ** BUG ALERT: for some reason the first version of
+ ** many spi_master drivers seems to get this wrong.
+ ** When you code setup(), ASSUME that the controller
+ ** is actively processing transfers for another device.
+
master->transfer(struct spi_device *spi, struct spi_message *message)
This must not sleep. Its responsibility is arrange that the
transfer happens and its complete() callback is issued. The two
EMBEDDED LINUX
P: Paul Gortmaker
M: paul.gortmaker@windriver.com
+P: Matt Mackall
+M: mpm@selenic.com
P: David Woodhouse
M: dwmw2@infradead.org
L: linux-embedded@vger.kernel.org
F: include/linux/suspend.h
F: include/linux/freezer.h
F: include/linux/pm.h
-F: include/asm-*/suspend*.h
F: arch/*/include/asm/suspend*.h
HID CORE LAYER
M: eduard.munteanu@linux360.ro
L: linux-kernel@vger.kernel.org
S: Maintained
-F: Documentation/vm/kmemtrace.txt
+F: Documentation/trace/kmemtrace.txt
F: include/trace/kmemtrace.h
F: kernel/trace/kmemtrace.c
F: include/linux/suspend.h
F: include/linux/freezer.h
F: include/linux/pm.h
-F: include/asm-*/suspend.h
SVGA HANDLING
P: Martin Mares
S: Maintained
F: arch/m68knommu/
-UCLINUX FOR RENESAS H8/300
+UCLINUX FOR RENESAS H8/300 (H8300)
P: Yoshinori Sato
M: ysato@users.sourceforge.jp
W: http://uclinux-h8.sourceforge.jp/
int flag)
{
struct kstat stat;
- int error = -EINVAL;
+ int error;
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
-
- if (!error)
- error = cp_oldabi_stat64(&stat, statbuf);
-
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_oldabi_stat64(&stat, statbuf);
}
struct oabi_flock64 {
.handler = at91rm9200_timer_interrupt
};
-static cycle_t read_clk32k(void)
+static cycle_t read_clk32k(struct clocksource *cs)
{
return read_CRTR();
}
* Clocksource: just a monotonic counter of MCK/16 cycles.
* We don't care whether or not PIT irqs are enabled.
*/
-static cycle_t read_pit_clk(void)
+static cycle_t read_pit_clk(struct clocksource *cs)
{
unsigned long flags;
u32 elapsed;
/*
* clocksource
*/
-static cycle_t read_cycles(void)
+static cycle_t read_cycles(struct clocksource *cs)
{
struct timer_s *t = &timers[TID_CLOCKSOURCE];
IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN;
}
-cycle_t imx_get_cycles(void)
+cycle_t imx_get_cycles(struct clocksource *cs)
{
return IMX_TCN(TIMER_BASE);
}
/*
* clocksource
*/
-cycle_t ixp4xx_get_cycles(void)
+cycle_t ixp4xx_get_cycles(struct clocksource *cs)
{
return *IXP4XX_OSTS;
}
return IRQ_HANDLED;
}
-static cycle_t msm_gpt_read(void)
+static cycle_t msm_gpt_read(struct clocksource *cs)
{
return readl(MSM_GPT_BASE + TIMER_COUNT_VAL);
}
-static cycle_t msm_dgt_read(void)
+static cycle_t msm_dgt_read(struct clocksource *cs)
{
return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT;
}
.handler = netx_timer_interrupt,
};
-cycle_t netx_get_cycles(void)
+cycle_t netx_get_cycles(struct clocksource *cs)
{
return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE));
}
#define TIMER_CLOCKEVENT 1
static u32 latch;
-static cycle_t ns9360_clocksource_read(void)
+static cycle_t ns9360_clocksource_read(struct clocksource *cs)
{
return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE));
}
.handler = omap_mpu_timer2_interrupt,
};
-static cycle_t mpu_read(void)
+static cycle_t mpu_read(struct clocksource *cs)
{
return ~omap_mpu_timer_read(1);
}
* clocksource
*/
static struct omap_dm_timer *gpt_clocksource;
-static cycle_t clocksource_read_cycles(void)
+static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource);
}
.set_mode = pxa_osmr0_set_mode,
};
-static cycle_t pxa_read_oscr(void)
+static cycle_t pxa_read_oscr(struct clocksource *cs)
{
return OSCR;
}
.handler = realview_timer_interrupt,
};
-static cycle_t realview_get_cycles(void)
+static cycle_t realview_get_cycles(struct clocksource *cs)
{
return ~readl(timer3_va_base + TIMER_VALUE);
}
.handler = versatile_timer_interrupt,
};
-static cycle_t versatile_get_cycles(void)
+static cycle_t versatile_get_cycles(struct clocksource *cs)
{
return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
}
/* clock source */
-static cycle_t mxc_get_cycles(void)
+static cycle_t mxc_get_cycles(struct clocksource *cs)
{
return __raw_readl(TIMER_BASE + MXC_TCN);
}
#include <linux/clocksource.h>
-static cycle_t omap_32k_read(void)
+static cycle_t omap_32k_read(struct clocksource *cs)
{
return omap_readl(TIMER_32K_SYNCHRONIZED);
}
{
unsigned long long ret;
- ret = (unsigned long long)omap_32k_read();
+ ret = (unsigned long long)omap_32k_read(&clocksource_32k);
ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift;
return ret;
}
/*
* Clocksource handling.
*/
-static cycle_t orion_clksrc_read(void)
+static cycle_t orion_clksrc_read(struct clocksource *cs)
{
return 0xffffffff - readl(TIMER0_VAL);
}
#include <mach/pm.h>
-static cycle_t read_cycle_count(void)
+static cycle_t read_cycle_count(struct clocksource *cs)
{
return (cycle_t)sysreg_read(COUNT);
}
return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
}
-static cycle_t read_cycles(void)
+static cycle_t read_cycles(struct clocksource *cs)
{
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
}
-unsigned long long sched_clock(void)
-{
- return cycles_2_ns(read_cycles());
-}
-
static struct clocksource clocksource_bfin = {
.name = "bfin_cycles",
.rating = 350,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+unsigned long long sched_clock(void)
+{
+ return cycles_2_ns(read_cycles(&clocksource_bfin));
+}
+
static int __init bfin_clocksource_init(void)
{
set_cyc2ns_scale(get_cclk() / 1000);
struct pci_bus *__nongpreldata pci_root_bus;
struct pci_ops *__nongpreldata pci_root_ops;
+/*
+ * The accessible PCI window does not cover the entire CPU address space, but
+ * there are devices we want to access outside of that window, so we need to
+ * insert specific PCI bus resources instead of using the platform-level bus
+ * resources directly for the PCI root bus.
+ *
+ * These are configured and inserted by pcibios_init() and are attached to the
+ * root bus by pcibios_fixup_bus().
+ */
+static struct resource pci_ioport_resource = {
+ .name = "PCI IO",
+ .start = 0,
+ .end = IO_SPACE_LIMIT,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource pci_iomem_resource = {
+ .name = "PCI mem",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+};
+
/*
* Functions for accessing PCI configuration space
*/
#if 0
printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
#endif
+
+ if (bus->number == 0) {
+ bus->resource[0] = &pci_ioport_resource;
+ bus->resource[1] = &pci_iomem_resource;
+ }
+
pci_read_bridge_bases(bus);
if (bus->number == 0) {
/* enable PCI arbitration */
__reg_MB86943_pci_arbiter = MB86943_PCIARB_EN;
- ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00;
- ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff;
- ioport_resource.end += ioport_resource.start;
+ pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00;
+ pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff;
+ pci_ioport_resource.end += pci_ioport_resource.start;
printk("PCI IO window: %08llx-%08llx\n",
- (unsigned long long) ioport_resource.start,
- (unsigned long long) ioport_resource.end);
+ (unsigned long long) pci_ioport_resource.start,
+ (unsigned long long) pci_ioport_resource.end);
- iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00;
+ pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00;
+ pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff;
+ pci_iomem_resource.end += pci_iomem_resource.start;
- /* Reserve somewhere to write to flush posted writes. */
- iomem_resource.start += 0x400;
-
- iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff;
- iomem_resource.end += iomem_resource.start;
+ /* Reserve somewhere to write to flush posted writes. This is used by
+ * __flush_PCI_writes() from asm/io.h to force the write FIFO in the
+ * CPU-PCI bridge to flush as this doesn't happen automatically when a
+ * read is performed on the MB93090 development kit motherboard.
+ */
+ pci_iomem_resource.start += 0x400;
printk("PCI MEM window: %08llx-%08llx\n",
- (unsigned long long) iomem_resource.start,
- (unsigned long long) iomem_resource.end);
+ (unsigned long long) pci_iomem_resource.start,
+ (unsigned long long) pci_iomem_resource.end);
printk("PCI DMA memory: %08lx-%08lx\n",
dma_coherent_mem_start, dma_coherent_mem_end);
+ if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0)
+ panic("Unable to insert PCI IOMEM resource\n");
+ if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0)
+ panic("Unable to insert PCI IOPORT resource\n");
+
if (!pci_probe)
return -ENXIO;
register unsigned long ia64_intri_res asm ("r8"); \
register unsigned long __reg asm ("r8") = (reg); \
\
- BUILD_BUG_ON(!__builtin_constant_p(reg)); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(GETREG) \
+ (reg)) \
register unsigned long ia64_clobber1 asm ("r8"); \
register unsigned long ia64_clobber2 asm ("r9"); \
\
- BUILD_BUG_ON(!__builtin_constant_p(reg)); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(SETREG) \
+ (reg)) \
static void __iomem *cyclone_mc;
-static cycle_t read_cyclone(void)
+static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readq((void __iomem *)cyclone_mc);
}
{
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
/*
* Called with preemption disabled.
return;
}
+ smp_call_function_mask(mm->cpu_vm_mask,
+ (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+ local_irq_disable();
+ local_finish_flush_tlb_mm(mm);
+ local_irq_enable();
preempt_enable();
- /*
- * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
- * have been running in the address space. It's not clear that this is worth the
- * trouble though: to avoid races, we have to raise the IPI on the target CPU
- * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
- * rather trivial.
- */
- on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
}
void arch_send_call_function_single_ipi(int cpu)
#include "fsyscall_gtod_data.h"
-static cycle_t itc_get_cycles(void);
+static cycle_t itc_get_cycles(struct clocksource *cs);
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
.lock = SEQLOCK_UNLOCKED,
}
}
-static cycle_t itc_get_cycles(void)
+static cycle_t itc_get_cycles(struct clocksource *cs)
{
u64 lcycle, now, ret;
extern unsigned long sn_rtc_cycles_per_second;
-static cycle_t read_sn2(void)
+static cycle_t read_sn2(struct clocksource *cs)
{
return (cycle_t)readq(RTC_COUNTER_ADDR);
}
/***************************************************************************/
-static cycle_t m68328_read_clk(void)
+static cycle_t m68328_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
#define DMA_DTMR_ENABLE (1 << 0)
-static cycle_t cf_dt_get_cycles(void)
+static cycle_t cf_dt_get_cycles(struct clocksource *cs)
{
return __raw_readl(DTCN0);
}
/***************************************************************************/
-static cycle_t pit_read_clk(void)
+static cycle_t pit_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;
/***************************************************************************/
-static cycle_t mcftmr_read_clk(void)
+static cycle_t mcftmr_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;
static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr;
-static cycle_t txx9_cs_read(void)
+static cycle_t txx9_cs_read(struct clocksource *cs)
{
return __raw_readl(&txx9_cs_tmrptr->trr);
}
#include <asm/sibyte/sb1250.h>
-static cycle_t bcm1480_hpt_read(void)
+static cycle_t bcm1480_hpt_read(struct clocksource *cs)
{
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
}
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
-static cycle_t dec_ioasic_hpt_read(void)
+static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
{
return ioasic_read(IO_REG_FCTR);
}
while (!ds1287_timer_state())
;
- start = dec_ioasic_hpt_read();
+ start = dec_ioasic_hpt_read(&clocksource_dec);
while (i--)
while (!ds1287_timer_state())
;
- end = dec_ioasic_hpt_read();
+ end = dec_ioasic_hpt_read(&clocksource_dec);
freq = (end - start) * 10;
printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
#include <asm/time.h>
-static cycle_t c0_hpt_read(void)
+static cycle_t c0_hpt_read(struct clocksource *cs)
{
return read_c0_count();
}
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
-static cycle_t sb1250_hpt_read(void)
+static cycle_t sb1250_hpt_read(struct clocksource *cs)
{
unsigned int count;
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
-static cycle_t pit_read(void)
+static cycle_t pit_read(struct clocksource *cs)
{
unsigned long flags;
int count;
static unsigned long cpj;
-static cycle_t hpt_read(void)
+static cycle_t hpt_read(struct clocksource *cs)
{
return read_c0_count2();
}
setup_irq(irq, &hub_rt_irqaction);
}
-static cycle_t hub_rt_read(void)
+static cycle_t hub_rt_read(struct clocksource *cs)
{
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
}
#include <linux/clockchips.h>
#include <linux/clocksource.h>
-static cycle_t rtc_read(void);
+static cycle_t rtc_read(struct clocksource *);
static struct clocksource clocksource_rtc = {
.name = "rtc",
.rating = 400,
.read = rtc_read,
};
-static cycle_t timebase_read(void);
+static cycle_t timebase_read(struct clocksource *);
static struct clocksource clocksource_timebase = {
.name = "timebase",
.rating = 400,
}
/* clocksource code */
-static cycle_t rtc_read(void)
+static cycle_t rtc_read(struct clocksource *cs)
{
return (cycle_t)get_rtc();
}
-static cycle_t timebase_read(void)
+static cycle_t timebase_read(struct clocksource *cs)
{
return (cycle_t)get_tb();
}
struct stat64_emu31 __user* statbuf, int flag)
{
struct kstat stat;
- int error = -EINVAL;
-
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
+ int error;
- if (!error)
- error = cp_stat64(statbuf, &stat);
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_stat64(statbuf, &stat);
}
/*
return ts.tv_sec;
}
-static cycle_t read_tod_clock(void)
+static cycle_t read_tod_clock(struct clocksource *cs)
{
return get_clock();
}
{
clk_always_enable("uram0"); /* URAM */
clk_always_enable("xymem0"); /* XYMEM */
- clk_always_enable("rtc0"); /* RTC */
clk_always_enable("veu0"); /* VEU */
clk_always_enable("vpu0"); /* VPU */
clk_always_enable("jpu0"); /* JPU */
static int __init sh7723_devices_setup(void)
{
clk_always_enable("meram0"); /* MERAM */
- clk_always_enable("rtc0"); /* RTC */
clk_always_enable("veu1"); /* VEU2H1 */
clk_always_enable("veu0"); /* VEU2H0 */
clk_always_enable("vpu0"); /* VPU */
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
+ /*
+ * The shift for mmap2 is constant, regardless of PAGE_SIZE
+ * setting.
+ */
+ if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
+ return -EINVAL;
+
+ pgoff >>= PAGE_SHIFT - 12;
+
return do_mmap2(addr, len, prot, flags, fd, pgoff);
}
if (!clocksource_sh.rating)
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
- cycles = clocksource_sh.read();
+ cycles = clocksource_sh.read(&clocksource_sh);
return cyc2ns(&clocksource_sh, cycles);
}
#endif
*/
static int tmus_are_scaled;
-static cycle_t tmu_timer_read(void)
+static cycle_t tmu_timer_read(struct clocksource *cs)
{
return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
}
struct compat_stat64 __user * statbuf, int flag)
{
struct kstat stat;
- int error = -EINVAL;
-
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
-
- if (!error)
- error = cp_compat_stat64(&stat, statbuf);
+ int error;
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_compat_stat64(&stat, statbuf);
}
asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)
}
EXPORT_SYMBOL(udelay);
+static cycle_t clocksource_tick_read(struct clocksource *cs)
+{
+ return tick_ops->get_tick();
+}
+
void __init time_init(void)
{
unsigned long freq = sparc64_init_timers();
clocksource_tick.mult =
clocksource_hz2mult(freq,
clocksource_tick.shift);
- clocksource_tick.read = tick_ops->get_tick;
+ clocksource_tick.read = clocksource_tick_read;
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
#This is just to shut up some Kconfig warnings, so no prompt.
config INPUT
- bool
+ tristate
default n
source "arch/um/Kconfig.debug"
return IRQ_HANDLED;
}
-static cycle_t itimer_read(void)
+static cycle_t itimer_read(struct clocksource *cs)
{
return os_nsecs() / 1000;
}
struct stat64 __user *statbuf, int flag)
{
struct kstat stat;
- int error = -EINVAL;
+ int error;
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
-
- if (!error)
- error = cp_stat64(statbuf, &stat);
-
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_stat64(statbuf, &stat);
}
/*
/*
* Clock source related code
*/
-static cycle_t read_hpet(void)
+static cycle_t read_hpet(struct clocksource *cs)
{
return (cycle_t)hpet_readl(HPET_COUNTER);
}
hpet_restart_counter();
/* Verify whether hpet counter works */
- t1 = read_hpet();
+ t1 = hpet_readl(HPET_COUNTER);
rdtscll(start);
/*
rdtscll(now);
} while ((now - start) < 200000UL);
- if (t1 == read_hpet()) {
+ if (t1 == hpet_readl(HPET_COUNTER)) {
printk(KERN_WARNING
"HPET counter not counting. HPET disabled\n");
return -ENODEV;
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
-static cycle_t pit_read(void)
+static cycle_t pit_read(struct clocksource *cs)
{
static int old_count;
static u32 old_jifs;
return ret;
}
+static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
+{
+ return kvm_clock_read();
+}
+
/*
* If we don't do that, there is the possibility that the guest
* will calibrate under heavy load - thus, getting a lower lpj -
static struct clocksource kvm_clock = {
.name = "kvm-clock",
- .read = kvm_clock_read,
+ .read = kvm_clock_get_cycles,
.rating = 400,
.mask = CLOCKSOURCE_MASK(64),
.mult = 1 << KVM_SCALE,
* code, which is necessary to support wrapping clocksources like pm
* timer.
*/
-static cycle_t read_tsc(void)
+static cycle_t read_tsc(struct clocksource *cs)
{
cycle_t ret = (cycle_t)get_cycles();
/** vmi clocksource */
static struct clocksource clocksource_vmi;
-static cycle_t read_real_cycles(void)
+static cycle_t read_real_cycles(struct clocksource *cs)
{
cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
return max(ret, clocksource_vmi.cycle_last);
/* If we can't use the TSC, the kernel falls back to our lower-priority
* "lguest_clock", where we read the time value given to us by the Host. */
-static cycle_t lguest_clock_read(void)
+static cycle_t lguest_clock_read(struct clocksource *cs)
{
unsigned long sec, nsec;
return ret;
}
+static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
+{
+ return xen_clocksource_read();
+}
+
static void xen_read_wallclock(struct timespec *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
static struct clocksource xen_clocksource __read_mostly = {
.name = "xen",
.rating = 400,
- .read = xen_clocksource_read,
+ .read = xen_clocksource_get_cycles,
.mask = ~0,
.mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
.shift = XEN_SHIFT,
#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;
-static cycle_t read_hpet(void)
+static cycle_t read_hpet(struct clocksource *cs)
{
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
}
/* Events that were received with the proper format. */
IPMI_STAT_events,
+ /* Retransmissions on IPMB that failed. */
+ IPMI_STAT_dropped_rexmit_ipmb_commands,
+
+ /* Retransmissions on LAN that failed. */
+ IPMI_STAT_dropped_rexmit_lan_commands,
/* This *must* remain last, add new values above this. */
IPMI_NUM_STATS
#define ipmi_get_stat(intf, stat) \
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+static int is_lan_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_LAN_ADDR_TYPE;
+}
+
+static int is_ipmb_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
+}
+
+static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
+}
static void free_recv_msg_list(struct list_head *q)
{
return (smi_addr1->lun == smi_addr2->lun);
}
- if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+ if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
struct ipmi_ipmb_addr *ipmb_addr1
= (struct ipmi_ipmb_addr *) addr1;
struct ipmi_ipmb_addr *ipmb_addr2
&& (ipmb_addr1->lun == ipmb_addr2->lun));
}
- if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
+ if (is_lan_addr(addr1)) {
struct ipmi_lan_addr *lan_addr1
= (struct ipmi_lan_addr *) addr1;
struct ipmi_lan_addr *lan_addr2
|| (addr->channel < 0))
return -EINVAL;
- if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+ if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
if (len < sizeof(struct ipmi_ipmb_addr))
return -EINVAL;
return 0;
}
- if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
+ if (is_lan_addr(addr)) {
if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
return 0;
memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
ipmi_inc_stat(intf, sent_local_commands);
- } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+ } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
struct ipmi_ipmb_addr *ipmb_addr;
unsigned char ipmb_seq;
long seqid;
spin_lock_irqsave(&(intf->seq_lock), flags);
- ipmi_inc_stat(intf, sent_ipmb_commands);
-
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
goto out_err;
}
+ ipmi_inc_stat(intf, sent_ipmb_commands);
+
/*
* Store the sequence number in the message,
* so that when the send message response
*/
spin_unlock_irqrestore(&(intf->seq_lock), flags);
}
- } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
+ } else if (is_lan_addr(addr)) {
struct ipmi_lan_addr *lan_addr;
unsigned char ipmb_seq;
long seqid;
spin_lock_irqsave(&(intf->seq_lock), flags);
- ipmi_inc_stat(intf, sent_lan_commands);
-
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
goto out_err;
}
+ ipmi_inc_stat(intf, sent_lan_commands);
+
/*
* Store the sequence number in the message,
* so that when the send message response
ipmi_get_stat(intf, invalid_events));
out += sprintf(out, "events: %u\n",
ipmi_get_stat(intf, events));
+ out += sprintf(out, "failed rexmit LAN msgs: %u\n",
+ ipmi_get_stat(intf, dropped_rexmit_lan_commands));
+ out += sprintf(out, "failed rexmit IPMB msgs: %u\n",
+ ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
return (out - ((char *) page));
}
return rv;
}
+/*
+ * This routine will handle "Get Message" command responses with
+ * channels that use an OEM Medium. The message format belongs to
+ * the OEM. See IPMI 2.0 specification, Chapter 6 and
+ * Chapter 22, sections 22.6 and 22.24 for more details.
+ */
+static int handle_oem_get_msg_cmd(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ ipmi_user_t user = NULL;
+ struct ipmi_system_interface_addr *smi_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * We expect the OEM SW to perform error checking
+ * so we just do some basic sanity checks
+ */
+ if (msg->rsp_size < 4) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ /*
+ * This is an OEM Message so the OEM needs to know how
+ * handle the message. We do no interpretation.
+ */
+ netfn = msg->rsp[0] >> 2;
+ cmd = msg->rsp[1];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+
+ rv = 0;
+ } else {
+ /* Deliver the message to the user. */
+ ipmi_inc_stat(intf, handled_commands);
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &(recv_msg->addr));
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user = user;
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * The message starts at byte 4 which follows the
+ * the Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[4]),
+ msg->rsp_size - 4);
+ deliver_response(recv_msg);
+ }
+ }
+
+ return rv;
+}
+
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
struct ipmi_smi_msg *msg)
{
goto out;
}
+ /*
+ ** We need to make sure the channels have been initialized.
+ ** The channel_handler routine will set the "curr_channel"
+ ** equal to or greater than IPMI_MAX_CHANNELS when all the
+ ** channels for this interface have been initialized.
+ */
+ if (intf->curr_channel < IPMI_MAX_CHANNELS) {
+ requeue = 1; /* Just put the message back for now */
+ goto out;
+ }
+
switch (intf->channels[chan].medium) {
case IPMI_CHANNEL_MEDIUM_IPMB:
if (msg->rsp[4] & 0x04) {
break;
default:
- /*
- * We don't handle the channel type, so just
- * free the message.
- */
- requeue = 0;
+ /* Check for OEM Channels. Clients had better
+ register for these commands. */
+ if ((intf->channels[chan].medium
+ >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+ && (intf->channels[chan].medium
+ <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
+ requeue = handle_oem_get_msg_cmd(intf, msg);
+ } else {
+ /*
+ * We don't handle the channel type, so just
+ * free the message.
+ */
+ requeue = 0;
+ }
}
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
list_add_tail(&msg->link, timeouts);
if (ent->broadcast)
ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
- else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
+ else if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf, timed_out_lan_commands);
else
ipmi_inc_stat(intf, timed_out_ipmb_commands);
*/
ent->timeout = MAX_MSG_TIMEOUT;
ent->retries_left--;
- if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
- ipmi_inc_stat(intf, retransmitted_lan_commands);
- else
- ipmi_inc_stat(intf, retransmitted_ipmb_commands);
-
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
- if (!smi_msg)
+ if (!smi_msg) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ dropped_rexmit_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ dropped_rexmit_ipmb_commands);
return;
+ }
spin_unlock_irqrestore(&intf->seq_lock, *flags);
* resent.
*/
handlers = intf->handlers;
- if (handlers)
+ if (handlers) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ retransmitted_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ retransmitted_ipmb_commands);
+
intf->handlers->sender(intf->send_info,
smi_msg, 0);
- else
+ } else
ipmi_free_smi_msg(smi_msg);
spin_lock_irqsave(&intf->seq_lock, *flags);
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
-/* Bit for BMC global enables. */
-#define IPMI_BMC_RCV_MSG_INTR 0x01
-#define IPMI_BMC_EVT_MSG_INTR 0x02
-#define IPMI_BMC_EVT_MSG_BUFF 0x04
-#define IPMI_BMC_SYS_LOG 0x08
-
enum si_intf_state {
SI_NORMAL,
SI_GETTING_FLAGS,
OEM2_DATA_AVAIL)
unsigned char msg_flags;
+ /* Does the BMC have an event buffer? */
+ char has_event_buffer;
+
/*
* If set to true, this will request events the next time the
* state machine is idle.
{
struct smi_info *smi_info = send_info;
- if (atomic_read(&smi_info->stop_operation))
+ if (atomic_read(&smi_info->stop_operation) ||
+ !smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
};
#endif /* CONFIG_PPC_OF */
-
-static int try_get_dev_id(struct smi_info *smi_info)
+static int wait_for_msg_done(struct smi_info *smi_info)
{
- unsigned char msg[2];
- unsigned char *resp;
- unsigned long resp_len;
enum si_sm_result smi_result;
- int rv = 0;
-
- resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
- if (!resp)
- return -ENOMEM;
-
- /*
- * Do a Get Device ID command, since it comes back with some
- * useful info.
- */
- msg[0] = IPMI_NETFN_APP_REQUEST << 2;
- msg[1] = IPMI_GET_DEVICE_ID_CMD;
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
for (;;) {
} else
break;
}
- if (smi_result == SI_SM_HOSED) {
+ if (smi_result == SI_SM_HOSED)
/*
* We couldn't get the state machine to run, so whatever's at
* the port is probably not an IPMI SMI interface.
*/
- rv = -ENODEV;
+ return -ENODEV;
+
+ return 0;
+}
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv)
goto out;
- }
- /* Otherwise, we got some data. */
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
return rv;
}
+static int try_enable_event_buffer(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING
+ "ipmi_si: Error getting response from get global,"
+ " enables command, the event buffer is not"
+ " enabled.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ printk(KERN_WARNING
+ "ipmi_si: Invalid return from get global"
+ " enables command, cannot enable the event"
+ " buffer.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
+ /* buffer is already enabled, nothing to do. */
+ goto out;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING
+ "ipmi_si: Error getting response from set global,"
+ " enables command, the event buffer is not"
+ " enabled.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ printk(KERN_WARNING
+ "ipmi_si: Invalid return from get global,"
+ "enables command, not enable the event"
+ " buffer.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0)
+ /*
+ * An error when setting the event buffer bit means
+ * that the event buffer is not supported.
+ */
+ rv = -ENOENT;
+ out:
+ kfree(resp);
+ return rv;
+}
+
static int type_file_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
new_smi->intf_num = smi_num;
smi_num++;
+ rv = try_enable_event_buffer(new_smi);
+ if (rv == 0)
+ new_smi->has_event_buffer = 1;
+
/*
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
*/
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
- if (rv) {
+ if (!new_smi->pdev) {
printk(KERN_ERR
"ipmi_si_intf:"
" Unable to allocate platform device\n");
return v2;
}
-static cycle_t acpi_pm_read(void)
+static cycle_t acpi_pm_read(struct clocksource *cs)
{
return (cycle_t)read_pmtmr();
}
}
__setup("acpi_pm_good", acpi_pm_good_setup);
-static cycle_t acpi_pm_read_slow(void)
+static cycle_t acpi_pm_read_slow(struct clocksource *cs)
{
return (cycle_t)acpi_pm_read_verified();
}
unsigned long count, delta;
mach_prepare_counter();
- value1 = clocksource_acpi_pm.read();
+ value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
mach_countup(&count);
- value2 = clocksource_acpi_pm.read();
+ value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
delta = (value2 - value1) & ACPI_PM_MASK;
/* Check that the PMTMR delta is within 5% of what we expect */
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
udelay(100 * j);
- value1 = clocksource_acpi_pm.read();
+ value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
- value2 = clocksource_acpi_pm.read();
+ value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
if (value2 == value1)
continue;
if (value2 > value1)
int use_cyclone = 0;
static void __iomem *cyclone_ptr;
-static cycle_t read_cyclone(void)
+static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readl(cyclone_ptr);
}
/* The base timer frequency, * 27 if selected */
#define HRT_FREQ 1000000
-static cycle_t read_hrt(void)
+static cycle_t read_hrt(struct clocksource *cs)
{
/* Read the timer value */
return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
static void __iomem *tcaddr;
-static cycle_t tc_get_cycles(void)
+static cycle_t tc_get_cycles(struct clocksource *cs)
{
unsigned long flags;
u32 lower, upper;
int row_index;
err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
- if (err_detect)
+ if (!err_detect)
return;
mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
MODULE_DEVICE_TABLE(parisc, hp_sdc_tbl);
static int __init hp_sdc_init_hppa(struct parisc_device *d);
+static struct delayed_work moduleloader_work;
static struct parisc_driver hp_sdc_driver = {
.name = "hp_sdc",
#if defined(__hppa__)
+static void request_module_delayed(struct work_struct *work)
+{
+ request_module("hp_sdc_mlc");
+}
+
static int __init hp_sdc_init_hppa(struct parisc_device *d)
{
+ int ret;
+
if (!d)
return 1;
if (hp_sdc.dev != NULL)
hp_sdc.data_io = d->hpa.start + 0x800;
hp_sdc.status_io = d->hpa.start + 0x801;
- return hp_sdc_init();
+ INIT_DELAYED_WORK(&moduleloader_work, request_module_delayed);
+
+ ret = hp_sdc_init();
+ /* after sucessfull initialization give SDC some time to settle
+ * and then load the hp_sdc_mlc upper layer driver */
+ if (!ret)
+ schedule_delayed_work(&moduleloader_work,
+ msecs_to_jiffies(2000));
+
+ return ret;
}
#endif /* __hppa__ */
static void hp_sdc_exit(void)
{
+ /* do nothing if we don't have a SDC */
+ if (!hp_sdc.dev)
+ return;
+
write_lock_irq(&hp_sdc.lock);
/* Turn off all maskable "sub-function" irq's. */
tasklet_kill(&hp_sdc.task);
#if defined(__hppa__)
+ cancel_delayed_work_sync(&moduleloader_work);
if (unregister_parisc_driver(&hp_sdc_driver))
printk(KERN_WARNING PREFIX "Error unregistering HP SDC");
#endif
/* Initalize the timer
*/
- init_timer(&pCfg->timer);
+ init_timer_on_stack(&pCfg->timer);
pCfg->timer.data = (unsigned long) ioc;
pCfg->timer.function = mpt_timer_expired;
pCfg->wait_done = 0;
void *gru_start_vaddr;
if (!is_uv_system())
- return -ENODEV;
+ return 0;
#if defined CONFIG_IA64
gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
enum xp_retval ret;
int ch_number;
+ /* initialize the connection registration mutex */
+ for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
+ mutex_init(&xpc_registrations[ch_number].mutex);
+
if (is_shub())
ret = xp_init_sn2();
else if (is_uv())
ret = xp_init_uv();
else
- ret = xpUnsupported;
+ ret = 0;
if (ret != xpSuccess)
- return -ENODEV;
-
- /* initialize the connection registration mutex */
- for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
- mutex_init(&xpc_registrations[ch_number].mutex);
+ return ret;
return 0;
}
config RTC_DRV_SH
tristate "SuperH On-Chip RTC"
- depends on RTC_CLASS && SUPERH
+ depends on RTC_CLASS && SUPERH && HAVE_CLK
help
Say Y here to enable support for the on-chip RTC found in
most SuperH processors.
goto cleanup2;
}
- pr_info("%s: alarms up to one %s%s, %zd bytes nvram%s\n",
- dev_name(&cmos_rtc.rtc->dev),
- is_valid_irq(rtc_irq)
- ? (cmos_rtc.mon_alrm
- ? "year"
- : (cmos_rtc.day_alrm
- ? "month" : "day"))
- : "no",
- cmos_rtc.century ? ", y3k" : "",
- nvram.size,
- is_hpet_enabled() ? ", hpet irqs" : "");
+ pr_info("%s: %s%s, %zd bytes nvram%s\n",
+ dev_name(&cmos_rtc.rtc->dev),
+ !is_valid_irq(rtc_irq) ? "no alarms" :
+ cmos_rtc.mon_alrm ? "alarms up to one year" :
+ cmos_rtc.day_alrm ? "alarms up to one month" :
+ "alarms up to one day",
+ cmos_rtc.century ? ", y3k" : "",
+ nvram.size,
+ is_hpet_enabled() ? ", hpet irqs" : "");
return 0;
/*
* SuperH On-Chip RTC Support
*
- * Copyright (C) 2006, 2007, 2008 Paul Mundt
+ * Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
* Copyright (C) 2008 Angelo Castello
*
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/log2.h>
+#include <linux/clk.h>
#include <asm/rtc.h>
#define DRV_NAME "sh-rtc"
-#define DRV_VERSION "0.2.1"
+#define DRV_VERSION "0.2.2"
#define RTC_REG(r) ((r) * rtc_reg_size)
#define RCR2_START 0x01 /* Start bit */
struct sh_rtc {
- void __iomem *regbase;
- unsigned long regsize;
- struct resource *res;
- int alarm_irq;
- int periodic_irq;
- int carry_irq;
- struct rtc_device *rtc_dev;
- spinlock_t lock;
- unsigned long capabilities; /* See asm-sh/rtc.h for cap bits */
- unsigned short periodic_freq;
+ void __iomem *regbase;
+ unsigned long regsize;
+ struct resource *res;
+ int alarm_irq;
+ int periodic_irq;
+ int carry_irq;
+ struct clk *clk;
+ struct rtc_device *rtc_dev;
+ spinlock_t lock;
+ unsigned long capabilities; /* See asm/rtc.h for cap bits */
+ unsigned short periodic_freq;
};
static int __sh_rtc_interrupt(struct sh_rtc *rtc)
tmp = readb(rtc->regbase + RCR1);
- if (!enable)
- tmp &= ~RCR1_AIE;
- else
+ if (enable)
tmp |= RCR1_AIE;
+ else
+ tmp &= ~RCR1_AIE;
writeb(tmp, rtc->regbase + RCR1);
{
if (!is_power_of_2(freq))
return -EINVAL;
+
return sh_rtc_ioctl(dev, RTC_IRQP_SET, freq);
}
struct sh_rtc *rtc;
struct resource *res;
struct rtc_time r;
- int ret;
+ char clk_name[6];
+ int clk_id, ret;
rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL);
if (unlikely(!rtc))
dev_err(&pdev->dev, "No IRQ resource\n");
goto err_badres;
}
+
rtc->periodic_irq = ret;
rtc->carry_irq = platform_get_irq(pdev, 1);
rtc->alarm_irq = platform_get_irq(pdev, 2);
goto err_badres;
}
- rtc->regsize = res->end - res->start + 1;
+ rtc->regsize = resource_size(res);
rtc->res = request_mem_region(res->start, rtc->regsize, pdev->name);
if (unlikely(!rtc->res)) {
goto err_badmap;
}
+ clk_id = pdev->id;
+ /* With a single device, the clock id is still "rtc0" */
+ if (clk_id < 0)
+ clk_id = 0;
+
+ snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id);
+
+ rtc->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(rtc->clk)) {
+ /*
+ * No error handling for rtc->clk intentionally, not all
+ * platforms will have a unique clock for the RTC, and
+ * the clk API can handle the struct clk pointer being
+ * NULL.
+ */
+ rtc->clk = NULL;
+ }
+
+ clk_enable(rtc->clk);
+
rtc->rtc_dev = rtc_device_register("sh", &pdev->dev,
&sh_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc_dev)) {
return 0;
err_unmap:
+ clk_disable(rtc->clk);
+ clk_put(rtc->clk);
iounmap(rtc->regbase);
err_badmap:
release_resource(rtc->res);
sh_rtc_setcie(&pdev->dev, 0);
free_irq(rtc->periodic_irq, rtc);
+
if (rtc->carry_irq > 0) {
free_irq(rtc->carry_irq, rtc);
free_irq(rtc->alarm_irq, rtc);
iounmap(rtc->regbase);
+ clk_disable(rtc->clk);
+ clk_put(rtc->clk);
+
platform_set_drvdata(pdev, NULL);
kfree(rtc);
struct sh_rtc *rtc = platform_get_drvdata(pdev);
set_irq_wake(rtc->periodic_irq, enabled);
+
if (rtc->carry_irq > 0) {
set_irq_wake(rtc->carry_irq, enabled);
set_irq_wake(rtc->alarm_irq, enabled);
}
-
}
static int sh_rtc_suspend(struct device *dev)
struct tty_struct *tty = uart->port.info->port.tty;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && (!bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
struct bfin_serial_port *uart = dev_id;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && (!bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
struct circ_buf *xmit = &uart->port.info->xmit;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && (!bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
while (read_SSSR(reg) & SSSR_RNE) {
read_SSDR(reg);
}
- } while ((read_SSSR(reg) & SSSR_BSY) && limit--);
+ } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
write_SSSR(SSSR_ROR, reg);
return limit;
{
unsigned long limit = loops_per_jiffy << 1;
- while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
+ while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
cpu_relax();
return limit;
{
unsigned long limit = loops_per_jiffy << 1;
- while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
+ while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
cpu_relax();
return limit;
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
+ if (drv_data->rx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_rx) =
+ DRCMR_MAPVLD | drv_data->rx_channel;
+ if (drv_data->tx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_tx) =
+ DRCMR_MAPVLD | drv_data->tx_channel;
+
/* Enable the SSP clock */
clk_enable(ssp->clk);
writeb(0xff, mmio_base + 0x78c);
chips_hw_init(p);
+ return 0;
}
static int __devinit
static irqreturn_t pxafb_handle_irq(int irq, void *dev_id)
{
struct pxafb_info *fbi = dev_id;
- unsigned int lccr0, lcsr, lcsr1;
+ unsigned int lccr0, lcsr;
lcsr = lcd_readl(fbi, LCSR);
if (lcsr & LCSR_LDD) {
lcd_writel(fbi, LCSR, lcsr);
#ifdef CONFIG_FB_PXA_OVERLAY
- lcsr1 = lcd_readl(fbi, LCSR1);
- if (lcsr1 & LCSR1_BS(1))
- complete(&fbi->overlay[0].branch_done);
+ {
+ unsigned int lcsr1 = lcd_readl(fbi, LCSR1);
+ if (lcsr1 & LCSR1_BS(1))
+ complete(&fbi->overlay[0].branch_done);
- if (lcsr1 & LCSR1_BS(2))
- complete(&fbi->overlay[1].branch_done);
+ if (lcsr1 & LCSR1_BS(2))
+ complete(&fbi->overlay[1].branch_done);
- lcd_writel(fbi, LCSR1, lcsr1);
+ lcd_writel(fbi, LCSR1, lcsr1);
+ }
#endif
return IRQ_HANDLED;
}
{
struct autofs_dirhash *dh = &sbi->dirhash;
struct autofs_dir_ent *ent;
- struct dentry *dentry;
unsigned long timeout = sbi->exp_timeout;
while (1) {
+ struct path path;
+ int umount_ok;
+
if ( list_empty(&dh->expiry_head) || sbi->catatonic )
return NULL; /* No entries */
/* We keep the list sorted by last_usage and want old stuff */
return ent; /* Symlinks are always expirable */
/* Get the dentry for the autofs subdirectory */
- dentry = ent->dentry;
+ path.dentry = ent->dentry;
- if ( !dentry ) {
+ if (!path.dentry) {
/* Should only happen in catatonic mode */
printk("autofs: dentry == NULL but inode range is directory, entry %s\n", ent->name);
autofs_delete_usage(ent);
continue;
}
- if ( !dentry->d_inode ) {
- dput(dentry);
+ if (!path.dentry->d_inode) {
+ dput(path.dentry);
printk("autofs: negative dentry on expiry queue: %s\n",
ent->name);
autofs_delete_usage(ent);
/* Make sure entry is mounted and unused; note that dentry will
point to the mounted-on-top root. */
- if (!S_ISDIR(dentry->d_inode->i_mode)||!d_mountpoint(dentry)) {
+ if (!S_ISDIR(path.dentry->d_inode->i_mode) ||
+ !d_mountpoint(path.dentry)) {
DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
continue;
}
- mntget(mnt);
- dget(dentry);
- if (!follow_down(&mnt, &dentry)) {
- dput(dentry);
- mntput(mnt);
+ path.mnt = mnt;
+ path_get(&path);
+ if (!follow_down(&path.mnt, &path.dentry)) {
+ path_put(&path);
DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
continue;
}
- while (d_mountpoint(dentry) && follow_down(&mnt, &dentry))
+ while (d_mountpoint(path.dentry) &&
+ follow_down(&path.mnt, &path.dentry))
;
- dput(dentry);
+ umount_ok = may_umount(path.mnt);
+ path_put(&path);
- if ( may_umount(mnt) ) {
- mntput(mnt);
+ if (umount_ok) {
DPRINTK(("autofs: signaling expire on %s\n", ent->name));
return ent; /* Expirable! */
}
DPRINTK(("autofs: didn't expire due to may_umount: %s\n", ent->name));
- mntput(mnt);
}
return NULL; /* No expirable entries */
}
* Check a string doesn't overrun the chunk of
* memory we copied from user land.
*/
-static int invalid_str(char *str, void *end)
+static int invalid_str(char *str, size_t size)
{
- while ((void *) str <= end)
- if (!*str++)
- return 0;
+ if (memchr(str, 0, size))
+ return 0;
return -EINVAL;
}
}
if (param->size > sizeof(*param)) {
- err = invalid_str(param->path,
- (void *) ((size_t) param + param->size));
+ err = invalid_str(param->path, param->size - sizeof(*param));
if (err) {
AUTOFS_WARN(
"path string terminator missing for cmd(0x%08x)",
}
path = param->path;
- devid = sbi->sb->s_dev;
+ devid = new_encode_dev(sbi->sb->s_dev);
param->requester.uid = param->requester.gid = -1;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
-
- if (!vol_args)
- return -ENOMEM;
-
- if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
- ret = -EFAULT;
- goto out;
- }
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
namelen = strlen(vol_args->name);
out_unlock:
mutex_unlock(&root->fs_info->volume_mutex);
-out:
kfree(vol_args);
return ret;
}
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
- vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
-
- if (!vol_args)
- return -ENOMEM;
-
- if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
- ret = -EFAULT;
- goto out;
- }
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
namelen = strlen(vol_args->name);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
- if (!vol_args)
- return -ENOMEM;
-
- if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
- ret = -EFAULT;
- goto out;
- }
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
ret = btrfs_init_new_device(root, vol_args->name);
-out:
kfree(vol_args);
return ret;
}
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
- vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
- if (!vol_args)
- return -ENOMEM;
-
- if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
- ret = -EFAULT;
- goto out;
- }
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
ret = btrfs_rm_device(root, vol_args->name);
-out:
kfree(vol_args);
return ret;
}
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- vol = kmalloc(sizeof(*vol), GFP_KERNEL);
- if (!vol)
- return -ENOMEM;
-
- if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) {
- ret = -EFAULT;
- goto out;
- }
+ vol = memdup_user((void __user *)arg, sizeof(*vol));
+ if (IS_ERR(vol))
+ return PTR_ERR(vol);
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
&btrfs_fs_type, &fs_devices);
break;
}
-out:
+
kfree(vol);
return ret;
}
struct compat_stat __user *statbuf)
{
struct kstat stat;
- int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
+ int error;
- if (!error)
- error = cp_compat_stat(&stat, statbuf);
- return error;
+ error = vfs_stat(filename, &stat);
+ if (error)
+ return error;
+ return cp_compat_stat(&stat, statbuf);
}
asmlinkage long compat_sys_newlstat(char __user * filename,
struct compat_stat __user *statbuf)
{
struct kstat stat;
- int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
+ int error;
- if (!error)
- error = cp_compat_stat(&stat, statbuf);
- return error;
+ error = vfs_lstat(filename, &stat);
+ if (error)
+ return error;
+ return cp_compat_stat(&stat, statbuf);
}
#ifndef __ARCH_WANT_STAT64
struct compat_stat __user *statbuf, int flag)
{
struct kstat stat;
- int error = -EINVAL;
-
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
-
- if (!error)
- error = cp_compat_stat(&stat, statbuf);
+ int error;
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_compat_stat(&stat, statbuf);
}
#endif
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/atalk.h>
-#include <linux/loop.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci.h>
#include <linux/gigaset_dev.h>
#ifdef CONFIG_BLOCK
+#include <linux/loop.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/sg.h>
HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
/* block stuff */
#ifdef CONFIG_BLOCK
+/* loop */
+IGNORE_IOCTL(LOOP_CLR_FD)
/* Raw devices */
HANDLE_IOCTL(RAW_SETBIND, raw_ioctl)
HANDLE_IOCTL(RAW_GETBIND, raw_ioctl)
IGNORE_IOCTL(VFAT_IOCTL_READDIR_BOTH32)
IGNORE_IOCTL(VFAT_IOCTL_READDIR_SHORT32)
-/* loop */
-IGNORE_IOCTL(LOOP_CLR_FD)
-
#ifdef CONFIG_SPARC
/* Sparc framebuffers, handled in sbusfb_compat_ioctl() */
IGNORE_IOCTL(FBIOGTYPE)
int result;
unsigned long seq;
- /* FIXME: This is old behavior, needed? Please check callers. */
if (new_dentry == old_dentry)
return 1;
if (count == 0)
goto out;
- data = kmalloc(count, GFP_KERNEL);
- if (!data) {
- printk(KERN_ERR "%s: Out of memory whilst attempting to "
- "kmalloc([%zd], GFP_KERNEL)\n", __func__, count);
+
+ data = memdup_user(buf, count);
+ if (IS_ERR(data)) {
+ printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
+ __func__, PTR_ERR(data));
goto out;
}
- rc = copy_from_user(data, buf, count);
- if (rc) {
- printk(KERN_ERR "%s: copy_from_user returned error [%d]\n",
- __func__, rc);
- goto out_free;
- }
sz = count;
i = 0;
switch (data[i++]) {
return retval;
}
-int get_filesystem_list(char * buf)
+int __init get_filesystem_list(char *buf)
{
int len = 0;
struct file_system_type * tmp;
error = filemap_fdatawait(metamapping);
mapping_set_error(metamapping, error);
gfs2_ail_empty_gl(gl);
+ /*
+ * Writeback of the data mapping may cause the dirty flag to be set
+ * so we have to clear it again here.
+ */
+ smp_mb__before_clear_bit();
+ clear_bit(GLF_DIRTY, &gl->gl_flags);
}
/**
gfs2_glock_dq(&gh);
out:
gfs2_holder_uninit(&gh);
- if (ret)
+ if (ret == -ENOMEM)
+ ret = VM_FAULT_OOM;
+ else if (ret)
ret = VM_FAULT_SIGBUS;
return ret;
}
#include <linux/pagevec.h>
#include <linux/parser.h>
#include <linux/mman.h>
-#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/dnotify.h>
#include <linux/statfs.h>
bad_val:
printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
args[0].from, p);
- return 1;
+ return -EINVAL;
}
static int
int err;
struct qstr this;
+ WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
+
err = __lookup_one_len(name, &this, base, len);
if (err)
return ERR_PTR(err);
if (parent_path) {
detach_mnt(source_mnt, parent_path);
attach_mnt(source_mnt, path);
- touch_mnt_namespace(current->nsproxy->mnt_ns);
+ touch_mnt_namespace(parent_path->mnt->mnt_ns);
} else {
mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
commit_tree(source_mnt);
if (user.object_name_len > NCP_OBJECT_NAME_MAX_LEN)
return -ENOMEM;
if (user.object_name_len) {
- newname = kmalloc(user.object_name_len, GFP_USER);
- if (!newname)
- return -ENOMEM;
- if (copy_from_user(newname, user.object_name, user.object_name_len)) {
- kfree(newname);
- return -EFAULT;
- }
+ newname = memdup_user(user.object_name,
+ user.object_name_len);
+ if (IS_ERR(newname))
+ return PTR_ERR(newname);
} else {
newname = NULL;
}
if (user.len > NCP_PRIVATE_DATA_MAX_LEN)
return -ENOMEM;
if (user.len) {
- new = kmalloc(user.len, GFP_USER);
- if (!new)
- return -ENOMEM;
- if (copy_from_user(new, user.data, user.len)) {
- kfree(new);
- return -EFAULT;
- }
+ new = memdup_user(user.data, user.len);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
} else {
new = NULL;
}
if (args->npages != 0)
xdr_encode_pages(buf, args->pages, 0, args->len);
else
- req->rq_slen += args->len;
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec,
+ p + XDR_QUADLEN(args->len));
err = nfsacl_encode(buf, base, args->inode,
(args->mask & NFS_ACL) ?
goto out;
status = vfs_readdir(filp, nfsd4_build_namelist, &names);
fput(filp);
+ mutex_lock(&dir->d_inode->i_mutex);
while (!list_empty(&names)) {
entry = list_entry(names.next, struct name_list, list);
dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
- goto out;
+ break;
}
status = f(dir, dentry);
dput(dentry);
if (status)
- goto out;
+ break;
list_del(&entry->list);
kfree(entry);
}
+ mutex_unlock(&dir->d_inode->i_mutex);
out:
while (!list_empty(&names)) {
entry = list_entry(names.next, struct name_list, list);
return status;
}
-static int
-nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
-{
- int status;
-
- if (!S_ISREG(dir->d_inode->i_mode)) {
- printk("nfsd4: non-file found in client recovery directory\n");
- return -EINVAL;
- }
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- status = vfs_unlink(dir->d_inode, dentry);
- mutex_unlock(&dir->d_inode->i_mutex);
- return status;
-}
-
-static int
-nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
-{
- int status;
-
- /* For now this directory should already be empty, but we empty it of
- * any regular files anyway, just in case the directory was created by
- * a kernel from the future.... */
- nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- status = vfs_rmdir(dir->d_inode, dentry);
- mutex_unlock(&dir->d_inode->i_mutex);
- return status;
-}
-
static int
nfsd4_unlink_clid_dir(char *name, int namlen)
{
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
dentry = lookup_one_len(name, rec_dir.dentry, namlen);
- mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
- return status;
+ goto out_unlock;
}
status = -ENOENT;
if (!dentry->d_inode)
goto out;
-
- status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
+ status = vfs_rmdir(rec_dir.dentry->d_inode, dentry);
out:
dput(dentry);
+out_unlock:
+ mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
return status;
}
if (nfs4_has_reclaimed_state(child->d_name.name, false))
return 0;
- status = nfsd4_clear_clid_dir(parent, child);
+ status = vfs_rmdir(parent->d_inode, child);
if (status)
printk("failed to remove client recovery directory %s\n",
child->d_name.name);
}
if ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
/* successfully crossed mount point */
- exp_put(exp);
- *expp = exp2;
+ /*
+ * This is subtle: dentry is *not* under mnt at this point.
+ * The only reason we are safe is that original mnt is pinned
+ * down by exp, so we should dput before putting exp.
+ */
dput(dentry);
*dpp = mounts;
+ exp_put(exp);
+ *expp = exp2;
} else {
exp_put(exp2);
dput(mounts);
return 0;
}
-static int nfsd_buffered_readdir(struct file *file, filldir_t func,
- struct readdir_cd *cdp, loff_t *offsetp)
+static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
+ struct readdir_cd *cdp, loff_t *offsetp)
{
struct readdir_data buf;
struct buffered_dirent *de;
buf.dirent = (void *)__get_free_page(GFP_KERNEL);
if (!buf.dirent)
- return -ENOMEM;
+ return nfserrno(-ENOMEM);
offset = *offsetp;
while (1) {
+ struct inode *dir_inode = file->f_path.dentry->d_inode;
unsigned int reclen;
cdp->err = nfserr_eof; /* will be cleared on successful read */
if (!size)
break;
+ /*
+ * Various filldir functions may end up calling back into
+ * lookup_one_len() and the file system's ->lookup() method.
+ * These expect i_mutex to be held, as it would within readdir.
+ */
+ host_err = mutex_lock_killable(&dir_inode->i_mutex);
+ if (host_err)
+ break;
+
de = (struct buffered_dirent *)buf.dirent;
while (size > 0) {
offset = de->offset;
if (func(cdp, de->name, de->namlen, de->offset,
de->ino, de->d_type))
- goto done;
+ break;
if (cdp->err != nfs_ok)
- goto done;
+ break;
reclen = ALIGN(sizeof(*de) + de->namlen,
sizeof(u64));
size -= reclen;
de = (struct buffered_dirent *)((char *)de + reclen);
}
+ mutex_unlock(&dir_inode->i_mutex);
+ if (size > 0) /* We bailed out early */
+ break;
+
offset = vfs_llseek(file, 0, SEEK_CUR);
}
- done:
free_page((unsigned long)(buf.dirent));
if (host_err)
EXPORT_SYMBOL(vfs_getattr);
-int vfs_stat_fd(int dfd, char __user *name, struct kstat *stat)
+int vfs_fstat(unsigned int fd, struct kstat *stat)
{
- struct path path;
- int error;
+ struct file *f = fget(fd);
+ int error = -EBADF;
- error = user_path_at(dfd, name, LOOKUP_FOLLOW, &path);
- if (!error) {
- error = vfs_getattr(path.mnt, path.dentry, stat);
- path_put(&path);
+ if (f) {
+ error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
+ fput(f);
}
return error;
}
+EXPORT_SYMBOL(vfs_fstat);
-int vfs_stat(char __user *name, struct kstat *stat)
+int vfs_fstatat(int dfd, char __user *filename, struct kstat *stat, int flag)
{
- return vfs_stat_fd(AT_FDCWD, name, stat);
-}
+ struct path path;
+ int error = -EINVAL;
+ int lookup_flags = 0;
-EXPORT_SYMBOL(vfs_stat);
+ if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+ goto out;
-int vfs_lstat_fd(int dfd, char __user *name, struct kstat *stat)
-{
- struct path path;
- int error;
+ if (!(flag & AT_SYMLINK_NOFOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
- error = user_path_at(dfd, name, 0, &path);
- if (!error) {
- error = vfs_getattr(path.mnt, path.dentry, stat);
- path_put(&path);
- }
+ error = user_path_at(dfd, filename, lookup_flags, &path);
+ if (error)
+ goto out;
+
+ error = vfs_getattr(path.mnt, path.dentry, stat);
+ path_put(&path);
+out:
return error;
}
+EXPORT_SYMBOL(vfs_fstatat);
-int vfs_lstat(char __user *name, struct kstat *stat)
+int vfs_stat(char __user *name, struct kstat *stat)
{
- return vfs_lstat_fd(AT_FDCWD, name, stat);
+ return vfs_fstatat(AT_FDCWD, name, stat, 0);
}
+EXPORT_SYMBOL(vfs_stat);
-EXPORT_SYMBOL(vfs_lstat);
-
-int vfs_fstat(unsigned int fd, struct kstat *stat)
+int vfs_lstat(char __user *name, struct kstat *stat)
{
- struct file *f = fget(fd);
- int error = -EBADF;
-
- if (f) {
- error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
- fput(f);
- }
- return error;
+ return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
}
+EXPORT_SYMBOL(vfs_lstat);
-EXPORT_SYMBOL(vfs_fstat);
#ifdef __ARCH_WANT_OLD_STAT
SYSCALL_DEFINE2(stat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
{
struct kstat stat;
- int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
+ int error;
- if (!error)
- error = cp_old_stat(&stat, statbuf);
+ error = vfs_stat(filename, &stat);
+ if (error)
+ return error;
- return error;
+ return cp_old_stat(&stat, statbuf);
}
SYSCALL_DEFINE2(lstat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
{
struct kstat stat;
- int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
+ int error;
- if (!error)
- error = cp_old_stat(&stat, statbuf);
+ error = vfs_lstat(filename, &stat);
+ if (error)
+ return error;
- return error;
+ return cp_old_stat(&stat, statbuf);
}
SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
SYSCALL_DEFINE2(newstat, char __user *, filename, struct stat __user *, statbuf)
{
struct kstat stat;
- int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
-
- if (!error)
- error = cp_new_stat(&stat, statbuf);
+ int error = vfs_stat(filename, &stat);
- return error;
+ if (error)
+ return error;
+ return cp_new_stat(&stat, statbuf);
}
SYSCALL_DEFINE2(newlstat, char __user *, filename, struct stat __user *, statbuf)
{
struct kstat stat;
- int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
+ int error;
- if (!error)
- error = cp_new_stat(&stat, statbuf);
+ error = vfs_lstat(filename, &stat);
+ if (error)
+ return error;
- return error;
+ return cp_new_stat(&stat, statbuf);
}
#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
struct stat __user *, statbuf, int, flag)
{
struct kstat stat;
- int error = -EINVAL;
-
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
-
- if (!error)
- error = cp_new_stat(&stat, statbuf);
+ int error;
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_new_stat(&stat, statbuf);
}
#endif
struct stat64 __user *, statbuf, int, flag)
{
struct kstat stat;
- int error = -EINVAL;
-
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
-
- if (!error)
- error = cp_new_stat64(&stat, statbuf);
+ int error;
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_new_stat64(&stat, statbuf);
}
#endif /* __ARCH_WANT_STAT64 */
count = size - offs;
}
- temp = kmalloc(count, GFP_KERNEL);
- if (!temp)
- return -ENOMEM;
-
- if (copy_from_user(temp, userbuf, count)) {
- count = -EFAULT;
- goto out_free;
- }
+ temp = memdup_user(userbuf, count);
+ if (IS_ERR(temp))
+ return PTR_ERR(temp);
mutex_lock(&bb->mutex);
if (count > 0)
*off = offs + count;
-out_free:
- kfree(temp);
return count;
}
if (size) {
if (size > XATTR_SIZE_MAX)
return -E2BIG;
- kvalue = kmalloc(size, GFP_KERNEL);
- if (!kvalue)
- return -ENOMEM;
- if (copy_from_user(kvalue, value, size)) {
- kfree(kvalue);
- return -EFAULT;
- }
+ kvalue = memdup_user(value, size);
+ if (IS_ERR(kvalue))
+ return PTR_ERR(kvalue);
}
error = vfs_setxattr(d, kname, kvalue, size, flags);
if (len > XATTR_SIZE_MAX)
return EINVAL;
- kbuf = kmalloc(len, GFP_KERNEL);
- if (!kbuf)
- return ENOMEM;
-
- if (copy_from_user(kbuf, ubuf, len))
- goto out_kfree;
+ kbuf = memdup_user(ubuf, len);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
- out_kfree:
- kfree(kbuf);
return error;
}
if (!size || size > 16 * PAGE_SIZE)
goto out_dput;
- error = ENOMEM;
- ops = kmalloc(size, GFP_KERNEL);
- if (!ops)
+ ops = memdup_user(am_hreq.ops, size);
+ if (IS_ERR(ops)) {
+ error = PTR_ERR(ops);
goto out_dput;
-
- error = EFAULT;
- if (copy_from_user(ops, am_hreq.ops, size))
- goto out_kfree_ops;
+ }
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
if (!attr_name)
goto out_kfree_ops;
-
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
ops[i].am_error = strncpy_from_user(attr_name,
if (!size || size > 16 * PAGE_SIZE)
goto out_dput;
- error = ENOMEM;
- ops = kmalloc(size, GFP_KERNEL);
- if (!ops)
+ ops = memdup_user(compat_ptr(am_hreq.ops), size);
+ if (IS_ERR(ops)) {
+ error = PTR_ERR(ops);
goto out_dput;
-
- error = EFAULT;
- if (copy_from_user(ops, compat_ptr(am_hreq.ops), size))
- goto out_kfree_ops;
+ }
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
if (!attr_name)
goto out_kfree_ops;
-
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
ops[i].am_error = strncpy_from_user(attr_name,
* 400-499: Perfect
* The ideal clocksource. A must-use where
* available.
- * @read: returns a cycle value
+ * @read: returns a cycle value, passes clocksource as argument
+ * @enable: optional function to enable the clocksource
+ * @disable: optional function to disable the clocksource
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier (adjusted by NTP)
char *name;
struct list_head list;
int rating;
- cycle_t (*read)(void);
+ cycle_t (*read)(struct clocksource *cs);
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
cycle_t mask;
u32 mult;
u32 mult_orig;
*/
static inline cycle_t clocksource_read(struct clocksource *cs)
{
- return cs->read();
+ return cs->read(cs);
+}
+
+/**
+ * clocksource_enable: - enable clocksource
+ * @cs: pointer to clocksource
+ *
+ * Enables the specified clocksource. The clocksource callback
+ * function should start up the hardware and setup mult and field
+ * members of struct clocksource to reflect hardware capabilities.
+ */
+static inline int clocksource_enable(struct clocksource *cs)
+{
+ return cs->enable ? cs->enable(cs) : 0;
+}
+
+/**
+ * clocksource_disable: - disable clocksource
+ * @cs: pointer to clocksource
+ *
+ * Disables the specified clocksource. The clocksource callback
+ * function should power down the now unused hardware block to
+ * save power.
+ */
+static inline void clocksource_disable(struct clocksource *cs)
+{
+ if (cs->disable)
+ cs->disable(cs);
}
/**
extern int vfs_stat(char __user *, struct kstat *);
extern int vfs_lstat(char __user *, struct kstat *);
-extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
-extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
+extern int vfs_fstatat(int , char __user *, struct kstat *, int);
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg);
int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
-int get_filesystem_list(char * buf);
+int __init get_filesystem_list(char *buf);
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
response. When you send a
response message, this will
be returned. */
+#define IPMI_OEM_RECV_TYPE 5 /* The response for OEM Channels */
+
/* Note that async events and received commands do not have a completion
code as the first byte of the incoming data, unlike a response. */
#define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35
#define IPMI_GET_CHANNEL_INFO_CMD 0x42
+/* Bit for BMC global enables. */
+#define IPMI_BMC_RCV_MSG_INTR 0x01
+#define IPMI_BMC_EVT_MSG_INTR 0x02
+#define IPMI_BMC_EVT_MSG_BUFF 0x04
+#define IPMI_BMC_SYS_LOG 0x08
+
#define IPMI_NETFN_STORAGE_REQUEST 0x0a
#define IPMI_NETFN_STORAGE_RESPONSE 0x0b
#define IPMI_ADD_SEL_ENTRY_CMD 0x44
#define IPMI_CHANNEL_MEDIUM_USB1 10
#define IPMI_CHANNEL_MEDIUM_USB2 11
#define IPMI_CHANNEL_MEDIUM_SYSINTF 12
+#define IPMI_CHANNEL_MEDIUM_OEM_MIN 0x60
+#define IPMI_CHANNEL_MEDIUM_OEM_MAX 0x7f
#endif /* __LINUX_IPMI_MSGDEFS_H */
{
struct mem_cgroup *mem;
rcu_read_lock();
- mem = mem_cgroup_from_task((mm)->owner);
+ mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
rcu_read_unlock();
return cgroup == mem;
}
atomic_t j_wcount; /* count of writers for current commit */
unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */
unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */
- unsigned long j_last_flush_trans_id; /* last fully flushed journal timestamp */
+ unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */
struct buffer_head *j_header_bh;
time_t j_trans_start_time; /* time this transaction started */
*/
u16 dma_alignment;
- /* setup mode and clock, etc (spi driver may call many times) */
+ /* Setup mode and clock, etc (spi driver may call many times).
+ *
+ * IMPORTANT: this may be called when transfers to another
+ * device are active. DO NOT UPDATE SHARED REGISTERS in ways
+ * which could break those transfers.
+ */
int (*setup)(struct spi_device *spi);
/* bidirectional bulk transfers
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
#else
-#ifdef CONFIG_ALPHA
+#if defined(CONFIG_ALPHA) || defined(CONFIG_MIPS)
#define SYSCALL_ALIAS(alias, name) \
asm ( #alias " = " #name "\n\t.globl " #alias)
#else
dentry = dget(path.dentry);
path_put(&path);
- if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
- follow_up(&mnt, &dentry);
-
list_add_tail(&list, &tagged->mnt_list);
mutex_lock(&audit_filter_mutex);
resumed = test_and_clear_bit(0, &watchdog_resumed);
- wdnow = watchdog->read();
+ wdnow = watchdog->read(watchdog);
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
watchdog_last = wdnow;
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
- csnow = cs->read();
+ csnow = cs->read(cs);
if (unlikely(resumed)) {
cs->wd_last = csnow;
list_add(&cs->wd_list, &watchdog_list);
if (!started && watchdog) {
- watchdog_last = watchdog->read();
+ watchdog_last = watchdog->read(watchdog);
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer,
cpumask_first(cpu_online_mask));
cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
/* Start if list is not empty */
if (!list_empty(&watchdog_list)) {
- watchdog_last = watchdog->read();
+ watchdog_last = watchdog->read(watchdog);
watchdog_timer.expires =
jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer,
*/
#define JIFFIES_SHIFT 8
-static cycle_t jiffies_read(void)
+static cycle_t jiffies_read(struct clocksource *cs)
{
return (cycle_t) jiffies;
}
*/
static void change_clocksource(void)
{
- struct clocksource *new;
+ struct clocksource *new, *old;
new = clocksource_get_next();
clocksource_forward_now();
- new->raw_time = clock->raw_time;
+ if (clocksource_enable(new))
+ return;
+ new->raw_time = clock->raw_time;
+ old = clock;
clock = new;
+ clocksource_disable(old);
+
clock->cycle_last = 0;
- clock->cycle_last = clocksource_read(new);
+ clock->cycle_last = clocksource_read(clock);
clock->error = 0;
clock->xtime_nsec = 0;
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
ntp_init();
clock = clocksource_get_next();
+ clocksource_enable(clock);
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
clock->cycle_last = clocksource_read(clock);
/* Can mapped pages be reclaimed? */
int may_unmap;
+ /* Can pages be swapped as part of reclaim? */
+ int may_swap;
+
/* This context's SWAP_CLUSTER_MAX. If freeing memory for
* suspend, we effectively ignore SWAP_CLUSTER_MAX.
* In this context, it doesn't matter that we scan the
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
/* If we have no swap space, do not bother scanning anon pages. */
- if (nr_swap_pages <= 0) {
+ if (!sc->may_swap || (nr_swap_pages <= 0)) {
percent[0] = 0;
percent[1] = 100;
return;
.may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.may_unmap = 1,
+ .may_swap = 1,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
struct scan_control sc = {
.may_writepage = !laptop_mode,
.may_unmap = 1,
+ .may_swap = !noswap,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
};
struct zonelist *zonelist;
- if (noswap)
- sc.may_unmap = 0;
-
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
zonelist = NODE_DATA(numa_node_id())->node_zonelists;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
+ .may_swap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
.order = order,
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+ .may_swap = 1,
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,