(...)
i2c_adap = i2c_get_adapter(2);
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
- strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
+ strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
normal_i2c);
i2c_put_adapter(i2c_adap);
i8042.panicblink=
[HW] Frequency with which keyboard LEDs should blink
when kernel panics (default is 0.5 sec)
- i8042.notimeout [HW] Ignore timeout condition signalled by conroller
i8042.reset [HW] Reset the controller during init and cleanup
i8042.unlock [HW] Unlock (ignore) the keylock
disables clocksource verification at runtime.
Used to enable high-resolution timer mode on older
hardware, and in virtualized environment.
- [x86] noirqtime: Do not use TSC to do irq accounting.
- Used to run time disable IRQ_TIME_ACCOUNTING on any
- platforms where RDTSC is slow and this accounting
- can add overhead.
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
Cirrus Logic CS4206/4207
========================
mbp55 MacBook Pro 5,5
- imac27 IMac 27 Inch
auto BIOS setup (default)
/proc/bus/usb filesystem output
===============================
-(version 2010.09.13)
+(version 2003.05.30)
The usbfs filesystem for USB devices is traditionally mounted at
/proc/bus/usb. It provides the /proc/bus/usb/devices file, as well as
the /proc/bus/usb/BBB/DDD files.
-In many modern systems the usbfs filsystem isn't used at all. Instead
-USB device nodes are created under /dev/usb/ or someplace similar. The
-"devices" file is available in debugfs, typically as
-/sys/kernel/debug/usb/devices.
-
**NOTE**: If /proc/bus/usb appears empty, and a host controller
driver has been linked, then you need to mount the
Topology info:
-T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=dddd MxCh=dd
-| | | | | | | | |__MaxChildren
+T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=ddd MxCh=dd
+| | | | | | | | |__MaxChildren
| | | | | | | |__Device Speed in Mbps
| | | | | | |__DeviceNumber
| | | | | |__Count of devices at this level
Speed may be:
1.5 Mbit/s for low speed USB
12 Mbit/s for full speed USB
- 480 Mbit/s for high speed USB (added for USB 2.0);
- also used for Wireless USB, which has no fixed speed
- 5000 Mbit/s for SuperSpeed USB (added for USB 3.0)
+ 480 Mbit/s for high speed USB (added for USB 2.0)
- For reasons lost in the mists of time, the Port number is always
- too low by 1. For example, a device plugged into port 4 will
- show up with "Port=03".
Bandwidth info:
B: Alloc=ddd/ddd us (xx%), #Int=ddd, #Iso=ddd
an external hub connected to the root hub, and a mouse and
a serial converter connected to the external hub.
-T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
+T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
B: Alloc= 28/900 us ( 3%), #Int= 2, #Iso= 0
D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
P: Vendor=0000 ProdID=0000 Rev= 0.00
I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
E: Ad=81(I) Atr=03(Int.) MxPS= 8 Ivl=255ms
-T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4
+T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4
D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
P: Vendor=0451 ProdID=1446 Rev= 1.00
C:* #Ifs= 1 Cfg#= 1 Atr=e0 MxPwr=100mA
I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
E: Ad=81(I) Atr=03(Int.) MxPS= 1 Ivl=255ms
-T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0
+T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0
D: Ver= 1.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
P: Vendor=04b4 ProdID=0001 Rev= 0.00
C:* #Ifs= 1 Cfg#= 1 Atr=80 MxPwr=100mA
I: If#= 0 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=01 Prot=02 Driver=mouse
E: Ad=81(I) Atr=03(Int.) MxPS= 3 Ivl= 10ms
-T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0
+T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0
D: Ver= 1.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
P: Vendor=0565 ProdID=0001 Rev= 1.08
S: Manufacturer=Peracom Networks, Inc.
Selecting only the "T:" and "I:" lines from this (for example, by using
"procusb ti"), we have:
-T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
-T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4
+T: Bus=00 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
+T: Bus=00 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 4
I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
-T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0
+T: Bus=00 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=1.5 MxCh= 0
I: If#= 0 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=01 Prot=02 Driver=mouse
-T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0
+T: Bus=00 Lev=02 Prnt=02 Port=02 Cnt=02 Dev#= 4 Spd=12 MxCh= 0
I: If#= 0 Alt= 0 #EPs= 3 Cls=00(>ifc ) Sub=00 Prot=00 Driver=serial
STABLE BRANCH
M: Greg Kroah-Hartman <greg@kroah.com>
+M: Chris Wright <chrisw@sous-sol.org>
L: stable@kernel.org
S: Maintained
unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
- unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
-
- cpsr |= PSR_ENDSTATE;
+ unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
long err;
int i;
- if (nsops < 1 || nsops > SEMOPM)
+ if (nsops < 1)
return -EINVAL;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)
void default_idle(void);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
data = mca_bootmem();
first_time = 0;
} else
- data = (void *)__get_free_pages(GFP_KERNEL,
- get_order(sz));
+ data = __get_free_pages(GFP_KERNEL, get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
cpu);
* use the GART mapped mode.
*/
static u64
-tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
+tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;
zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
free_area_init_node(i, zones_size,
m68k_memory[i].addr >> PAGE_SHIFT, NULL);
- if (node_present_pages(i))
- node_set_state(i, N_NORMAL_MEMORY);
}
}
#include <linux/mtd/physmap.h>
#include <mtd/mtd-abi.h>
-#include <asm/mach-au1x00/au1xxx_eth.h>
-
static struct gpio_keys_button mtx1_gpio_button[] = {
{
.gpio = 207,
&mtx1_mtd,
};
-static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
- .phy_search_highest_addr = 1,
- .phy1_search_mac0 = 1,
-};
-
static int __init mtx1_register_devices(void)
{
int rc;
- au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
-
rc = gpio_request(mtx1_gpio_button[0].gpio,
mtx1_gpio_button[0].desc);
if (rc < 0) {
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
- gfp_t dma_flag;
-
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ZONE_DMA
if (dev == NULL)
- dma_flag = __GFP_DMA;
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
else
#endif
-#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
+#ifdef CONFIG_ZONE_DMA32
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- dma_flag = __GFP_DMA;
- else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA;
+ gfp |= __GFP_DMA32;
else
#endif
- dma_flag = 0;
+ ;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
- return gfp | dma_flag;
+ return gfp;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,
unsigned int i;
unsigned long flags;
- for (i = 0; i < count;) {
+ for (i = 0; i < count && i < 79;) {
switch(str[i]) {
case '\n':
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
goto print;
+ case '\b': /* BS */
+ i--; /* overwrite last */
default:
iodc_dbuf[i] = str[i];
i++;
}
}
+ /* if we're at the end of line, and not already inserting a newline,
+ * insert one anyway. iodc console doesn't claim to support >79 char
+ * lines. don't account for this in the return value.
+ */
+ if (i == 79 && iodc_dbuf[i-1] != '\n') {
+ iodc_dbuf[i+0] = '\r';
+ iodc_dbuf[i+1] = '\n';
+ }
+
print:
spin_lock_irqsave(&pdc_lock, flags);
real32_call(PAGE0->mem_cons.iodc_io,
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
- if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) {
+ if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
cpumask_setall(irq_desc[irq].affinity);
}
memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
- for (i = 0; i < npmem_ranges; i++) {
- node_set_state(i, N_NORMAL_MEMORY);
+ for (i = 0; i < npmem_ranges; i++)
node_set_online(i);
- }
#endif
/*
extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y))
wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
-dts-installed := $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))
+dts-installed := $(patsubst $(obj)/dts/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(obj)/dts/*.dts))
all-installed := $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed)
*/
#define PLPAR_HCALL9_BUFSIZE 9
long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
-long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
/* For hcall instrumentation. One structure per-hcall, per-CPU */
struct hcall_stats {
#define KEXEC_ARCH KEXEC_ARCH_PPC
#endif
-#define KEXEC_STATE_NONE 0
-#define KEXEC_STATE_IRQS_OFF 1
-#define KEXEC_STATE_REAL_MODE 2
-
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#include <asm/reg.h>
struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
#endif /* CONFIG_PPC_BOOK3S */
/*
- * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
+ * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
* load lock_token and paca_index with a single lwz
* instruction. They must travel together and be properly
* aligned.
s16 hw_cpu_id; /* Physical processor number */
u8 cpu_start; /* At startup, processor spins until */
/* this becomes non-zero. */
- u8 kexec_state; /* set when kexec down has irqs off */
#ifdef CONFIG_PPC_STD_MMU_64
struct slb_shadow *slb_shadow_ptr;
#define PV_970 0x0039
#define PV_POWER5 0x003A
#define PV_POWER5p 0x003B
-#define PV_POWER7 0x003F
#define PV_970FX 0x003C
#define PV_630 0x0040
#define PV_630p 0x0041
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
extern struct dentry *powerpc_debugfs_root;
#endif /* __KERNEL__ */
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
- DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
#include <asm/mmu.h>
_GLOBAL(__setup_cpu_603)
- mflr r5
+ mflr r4
BEGIN_MMU_FTR_SECTION
li r10,0
mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
bl __init_fpu_registers
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
bl setup_common_caches
- mtlr r5
+ mtlr r4
blr
_GLOBAL(__setup_cpu_604)
- mflr r5
+ mflr r4
bl setup_common_caches
bl setup_604_hid0
- mtlr r5
+ mtlr r4
blr
_GLOBAL(__setup_cpu_750)
- mflr r5
+ mflr r4
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
- mtlr r5
+ mtlr r4
blr
_GLOBAL(__setup_cpu_750cx)
- mflr r5
+ mflr r4
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750cx
- mtlr r5
+ mtlr r4
blr
_GLOBAL(__setup_cpu_750fx)
- mflr r5
+ mflr r4
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750fx
- mtlr r5
+ mtlr r4
blr
_GLOBAL(__setup_cpu_7400)
- mflr r5
+ mflr r4
bl __init_fpu_registers
bl setup_7400_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
- mtlr r5
+ mtlr r4
blr
_GLOBAL(__setup_cpu_7410)
- mflr r5
+ mflr r4
bl __init_fpu_registers
bl setup_7410_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
li r3,0
mtspr SPRN_L2CR2,r3
- mtlr r5
+ mtlr r4
blr
_GLOBAL(__setup_cpu_745x)
- mflr r5
+ mflr r4
bl setup_common_caches
bl setup_745x_specifics
- mtlr r5
+ mtlr r4
blr
/* Enable caches for 603's, 604, 750 & 7400 */
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bnelr
- lwz r6,CPU_SPEC_FEATURES(r4)
+ lwz r6,CPU_SPEC_FEATURES(r5)
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r4)
+ stw r6,CPU_SPEC_FEATURES(r5)
blr
/* 750fx specific
andis. r11,r11,L3CR_L3E@h
beq 1f
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
- lwz r6,CPU_SPEC_FEATURES(r4)
+ lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r4)
+ stw r6,CPU_SPEC_FEATURES(r5)
1:
mfspr r11,SPRN_HID0
/* Leave the IPI callback set */
}
-/* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#ifdef CONFIG_PPC_STD_MMU_64
-static void crash_kexec_wait_realmode(int cpu)
-{
- unsigned int msecs;
- int i;
-
- msecs = 10000;
- for (i=0; i < NR_CPUS && msecs > 0; i++) {
- if (i == cpu)
- continue;
-
- while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
- barrier();
- if (!cpu_possible(i)) {
- break;
- }
- if (!cpu_online(i)) {
- break;
- }
- msecs--;
- mdelay(1);
- }
- }
- mb();
-}
-#endif
-
/*
* This function will be called by secondary cpus or by kexec cpu
* if soft-reset is activated to stop some CPUs.
EXPORT_SYMBOL(crash_shutdown_unregister);
static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
-static int crash_shutdown_cpu = -1;
static int handle_fault(struct pt_regs *regs)
{
- if (crash_shutdown_cpu == smp_processor_id())
- longjmp(crash_shutdown_buf, 1);
+ longjmp(crash_shutdown_buf, 1);
return 0;
}
for_each_irq(i) {
struct irq_desc *desc = irq_desc + i;
- if (!desc || !desc->chip || !desc->chip->eoi)
- continue;
-
if (desc->status & IRQ_INPROGRESS)
desc->chip->eoi(i);
if (!(desc->status & IRQ_DISABLED))
- desc->chip->shutdown(i);
+ desc->chip->disable(i);
}
/*
*/
old_handler = __debugger_fault_handler;
__debugger_fault_handler = handle_fault;
- crash_shutdown_cpu = smp_processor_id();
for (i = 0; crash_shutdown_handles[i]; i++) {
if (setjmp(crash_shutdown_buf) == 0) {
/*
asm volatile("sync; isync");
}
}
- crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler;
/*
crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash);
crash_kexec_stop_spus();
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
- crash_kexec_wait_realmode(crashing_cpu);
-#endif
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
#include <linux/thread_info.h>
#include <linux/init_task.h>
#include <linux/errno.h>
-#include <linux/cpu.h>
#include <asm/page.h>
#include <asm/current.h>
#ifdef CONFIG_SMP
-static int kexec_all_irq_disabled;
-
+/* FIXME: we should schedule this function to be called on all cpus based
+ * on calling the interrupts, but we would like to call it off irq level
+ * so that the interrupt controller is clean.
+ */
static void kexec_smp_down(void *arg)
{
- local_irq_disable();
- mb(); /* make sure our irqs are disabled before we say they are */
- get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
- while (kexec_all_irq_disabled == 0)
- cpu_relax();
- mb(); /* make sure all irqs are disabled before this */
- /*
- * Now every CPU has IRQs off, we can clear out any pending
- * IPIs and be sure that no more will come in after this.
- */
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 1);
+ local_irq_disable();
kexec_smp_wait();
/* NOTREACHED */
}
-/*
- * We need to make sure each present CPU is online. The next kernel will scan
- * the device tree and assume primary threads are online and query secondary
- * threads via RTAS to online them if required. If we don't online primary
- * threads, they will be stuck. However, we also online secondary threads as we
- * may be using 'cede offline'. In this case RTAS doesn't see the secondary
- * threads as offline -- and again, these CPUs will be stuck.
- *
- * So, we online all CPUs that should be running, including secondary threads.
- */
-static void wake_offline_cpus(void)
-{
- int cpu = 0;
-
- for_each_present_cpu(cpu) {
- if (!cpu_online(cpu)) {
- printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
- cpu);
- cpu_up(cpu);
- }
- }
-}
-
-static void kexec_prepare_cpus_wait(int wait_state)
+static void kexec_prepare_cpus(void)
{
int my_cpu, i, notified=-1;
- wake_offline_cpus();
+ smp_call_function(kexec_smp_down, NULL, /* wait */0);
my_cpu = get_cpu();
- /* Make sure each CPU has atleast made it to the state we need */
+
+ /* check the others cpus are now down (via paca hw cpu id == -1) */
for (i=0; i < NR_CPUS; i++) {
if (i == my_cpu)
continue;
- while (paca[i].kexec_state < wait_state) {
+ while (paca[i].hw_cpu_id != -1) {
barrier();
if (!cpu_possible(i)) {
printk("kexec: cpu %d hw_cpu_id %d is not"
}
if (i != notified) {
printk( "kexec: waiting for cpu %d (physical"
- " %d) to enter %i state\n",
- i, paca[i].hw_cpu_id, wait_state);
+ " %d) to go down\n",
+ i, paca[i].hw_cpu_id);
notified = i;
}
}
}
- mb();
-}
-
-static void kexec_prepare_cpus(void)
-{
-
- smp_call_function(kexec_smp_down, NULL, /* wait */0);
- local_irq_disable();
- mb(); /* make sure IRQs are disabled before we say they are */
- get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
-
- kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
- /* we are sure every CPU has IRQs off at this point */
- kexec_all_irq_disabled = 1;
/* after we tell the others to go down */
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
-/* Before removing MMU mapings make sure all CPUs have entered real mode */
- kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
-
put_cpu();
+
+ local_irq_disable();
}
#else /* ! SMP */
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
-#include <asm/kexec.h>
.text
1: mflr r5
addi r5,r5,kexec_flag-1b
- li r4,KEXEC_STATE_REAL_MODE
- stb r4,PACAKEXECSTATE(r13)
- SYNC
-
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
* note: this is a terminal routine, it does not save lr
*
* get phys id from paca
+ * set paca id to -1 to say we got here
* switch to real mode
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+ li r4,-1
+ sth r4,PACAHWCPUID(r13) /* let others know we left */
bl real_mode
b .kexec_wait
#include <asm/paca.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
-#include <asm/kexec.h>
/* This symbol is provided by the linker - let it fill in the paca
* field correctly */
new_paca->kernelbase = (unsigned long) _stext;
new_paca->kernel_msr = MSR_KERNEL;
new_paca->hw_cpu_id = 0xffff;
- new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
#ifdef CONFIG_PPC_STD_MMU_64
new_paca->slb_shadow_ptr = &slb_shadow[cpu];
return ip;
}
-static bool pmc_overflow(unsigned long val)
-{
- if ((int)val < 0)
- return true;
-
- /*
- * Events on POWER7 can roll back if a speculative event doesn't
- * eventually complete. Unfortunately in some rare cases they will
- * raise a performance monitor exception. We need to catch this to
- * ensure we reset the PMC. In all cases the PMC will be 256 or less
- * cycles from overflow.
- *
- * We only do this if the first pass fails to find any overflowing
- * PMCs because a user might set a period of less than 256 and we
- * don't want to mistakenly reset them.
- */
- if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
- return true;
-
- return false;
-}
-
/*
* Performance monitor interrupt stuff
*/
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
- if (pmc_overflow(val))
+ if ((int)val < 0)
write_pmc(i + 1, 0);
}
}
struct flash_block_list *next;
struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
};
+struct flash_block_list_header { /* just the header of flash_block_list */
+ unsigned long num_blocks;
+ struct flash_block_list *next;
+};
-static struct flash_block_list *rtas_firmware_flash_list;
+static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
/* Use slab cache to guarantee 4k alignment */
static struct kmem_cache *flash_block_cache = NULL;
/* Local copy of the flash block list.
* We only allow one open of the flash proc file and create this
- * list as we go. The rtas_firmware_flash_list varable will be
- * set once the data is fully read.
+ * list as we go. This list will be put in the
+ * rtas_firmware_flash_list var once it is fully read.
*
* For convenience as we build the list we use virtual addrs,
* we do not fill in the version number, and the length field
* is treated as the number of entries currently in the block
- * (i.e. not a byte count). This is all fixed when calling
- * the flash routine.
+ * (i.e. not a byte count). This is all fixed on release.
*/
/* Status int must be first member of struct */
if (uf->flist) {
/* File was opened in write mode for a new flash attempt */
/* Clear saved list */
- if (rtas_firmware_flash_list) {
- free_flash_list(rtas_firmware_flash_list);
- rtas_firmware_flash_list = NULL;
+ if (rtas_firmware_flash_list.next) {
+ free_flash_list(rtas_firmware_flash_list.next);
+ rtas_firmware_flash_list.next = NULL;
}
if (uf->status != FLASH_AUTH)
uf->status = flash_list_valid(uf->flist);
if (uf->status == FLASH_IMG_READY)
- rtas_firmware_flash_list = uf->flist;
+ rtas_firmware_flash_list.next = uf->flist;
else
free_flash_list(uf->flist);
unsigned long rtas_block_list;
int i, status, update_token;
- if (rtas_firmware_flash_list == NULL)
+ if (rtas_firmware_flash_list.next == NULL)
return; /* nothing to do */
if (reboot_type != SYS_RESTART) {
return;
}
- /*
- * NOTE: the "first" block must be under 4GB, so we create
- * an entry with no data blocks in the reserved buffer in
- * the kernel data segment.
+ /* NOTE: the "first" block list is a global var with no data
+ * blocks in the kernel data segment. We do this because
+ * we want to ensure this block_list addr is under 4GB.
*/
- spin_lock(&rtas_data_buf_lock);
- flist = (struct flash_block_list *)&rtas_data_buf[0];
- flist->num_blocks = 0;
- flist->next = rtas_firmware_flash_list;
+ rtas_firmware_flash_list.num_blocks = 0;
+ flist = (struct flash_block_list *)&rtas_firmware_flash_list;
rtas_block_list = virt_to_abs(flist);
if (rtas_block_list >= 4UL*1024*1024*1024) {
printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
- spin_unlock(&rtas_data_buf_lock);
return;
}
printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
/* Update the block_list in place. */
- rtas_firmware_flash_list = NULL; /* too hard to backout on error */
image_size = 0;
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
break;
}
- spin_unlock(&rtas_data_buf_lock);
}
static void remove_flash_pde(struct proc_dir_entry *dp)
DBG(" <- setup_system()\n");
}
-static u64 slb0_limit(void)
-{
- if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
- return 1UL << SID_SHIFT_1T;
- }
- return 1UL << SID_SHIFT;
-}
-
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
- u64 limit = slb0_limit();
unsigned int i;
/*
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, limit));
+ THREAD_SIZE, 0x10000000));
hardirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, limit));
+ THREAD_SIZE, 0x10000000));
}
}
#else
*/
static void __init emergency_stack_init(void)
{
- u64 limit;
+ unsigned long limit;
unsigned int i;
/*
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(slb0_limit(), lmb.rmo_size);
+ limit = min(0x10000000ULL, lmb.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;
mtcrf 0xff,r0
blr /* return r3 = status */
-
-/* See plpar_hcall_raw to see why this is needed */
-_GLOBAL(plpar_hcall9_raw)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- std r4,STK_PARM(r4)(r1) /* Save ret buffer */
-
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
- mr r9,r10
- ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
- ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
- ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
-
- HVSC /* invoke the hypervisor */
-
- mr r0,r12
- ld r12,STK_PARM(r4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
- std r8, 32(r12)
- std r9, 40(r12)
- std r10,48(r12)
- std r11,56(r12)
- std r0, 64(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
{
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
- struct {
- unsigned long pteh;
- unsigned long ptel;
- } ptes[4];
+ unsigned long dummy1, dummy2, dword0;
long lpar_rc;
- int i, j;
+ int i;
- /* Read in batches of 4,
- * invalidate only valid entries not in the VRMA
- * hpte_count will be a multiple of 4
- */
- for (i = 0; i < hpte_count; i += 4) {
- lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
- if (lpar_rc != H_SUCCESS)
- continue;
- for (j = 0; j < 4; j++){
- if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
- HPTE_V_VRMA_MASK)
- continue;
- if (ptes[j].pteh & HPTE_V_VALID)
- plpar_pte_remove_raw(0, i + j, 0,
- &(ptes[j].pteh), &(ptes[j].ptel));
+ /* TODO: Use bulk call */
+ for (i = 0; i < hpte_count; i++) {
+ /* dont remove HPTEs with VRMA mappings */
+ lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG,
+ &dummy1, &dummy2);
+ if (lpar_rc == H_NOT_FOUND) {
+ lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1);
+ if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK)
+ != HPTE_V_VRMA_MASK))
+ /* Can be hpte for 1TB Seg. So remove it */
+ plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
}
}
}
return rc;
}
-/*
- * plpar_pte_read_4_raw can be called in real mode.
- * ptes must be 8*sizeof(unsigned long)
- */
-static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
- unsigned long *ptes)
-
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
-
- rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
-
- memcpy(ptes, retbuf, 8*sizeof(unsigned long));
-
- return rc;
-}
-
static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
unsigned long avpn)
{
if (dsr & DOORBELL_DSR_QFI) {
pr_info("RIO: doorbell queue full\n");
out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
+ goto out;
}
/* XXX Need to check/dispatch until queue empty */
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
+/*
+ * Print register of task into buffer. Used in fs/proc/array.c.
+ */
+extern void task_show_regs(struct seq_file *m, struct task_struct *task);
+
extern void show_code(struct pt_regs *regs);
unsigned long get_wchan(struct task_struct *p);
extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
+extern void account_system_vtime(struct task_struct *);
#ifdef CONFIG_PFAULT
extern void pfault_irq_init(void);
#define VDSO32_LBASE 0
#define VDSO64_LBASE 0
-#define VDSO_VERSION_STRING LINUX_2.6.29
+#define VDSO_VERSION_STRING LINUX_2.6.26
#ifndef __ASSEMBLY__
show_last_breaking_event(regs);
}
+/* This is called from fs/proc/array.c */
+void task_show_regs(struct seq_file *m, struct task_struct *task)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(task);
+ seq_printf(m, "task: %p, ksp: %p\n",
+ task, (void *)task->thread.ksp);
+ seq_printf(m, "User PSW : %p %p\n",
+ (void *) regs->psw.mask, (void *)regs->psw.addr);
+
+ seq_printf(m, "User GPRS: " FOURLONG,
+ regs->gprs[0], regs->gprs[1],
+ regs->gprs[2], regs->gprs[3]);
+ seq_printf(m, " " FOURLONG,
+ regs->gprs[4], regs->gprs[5],
+ regs->gprs[6], regs->gprs[7]);
+ seq_printf(m, " " FOURLONG,
+ regs->gprs[8], regs->gprs[9],
+ regs->gprs[10], regs->gprs[11]);
+ seq_printf(m, " " FOURLONG,
+ regs->gprs[12], regs->gprs[13],
+ regs->gprs[14], regs->gprs[15]);
+ seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
+ task->thread.acrs[0], task->thread.acrs[1],
+ task->thread.acrs[2], task->thread.acrs[3]);
+ seq_printf(m, " %08x %08x %08x %08x\n",
+ task->thread.acrs[4], task->thread.acrs[5],
+ task->thread.acrs[6], task->thread.acrs[7]);
+ seq_printf(m, " %08x %08x %08x %08x\n",
+ task->thread.acrs[8], task->thread.acrs[9],
+ task->thread.acrs[10], task->thread.acrs[11]);
+ seq_printf(m, " %08x %08x %08x %08x\n",
+ task->thread.acrs[12], task->thread.acrs[13],
+ task->thread.acrs[14], task->thread.acrs[15]);
+}
+
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
-config IRQ_TIME_ACCOUNTING
- bool "Fine granularity task level IRQ time accounting"
- default n
- ---help---
- Select this option to enable fine granularity task irq time
- accounting. This is done by reading a timestamp on each
- transitions between softirq and hardirq state, so there can be a
- small performance impact.
-
- If in doubt, say N here.
-
source "kernel/Kconfig.preempt"
config X86_UP_APIC
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
-extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
#define APIC_DEST_LOGICAL 0x00800
#define APIC_DEST_PHYSICAL 0x00000
#define APIC_DM_FIXED 0x00000
-#define APIC_DM_FIXED_MASK 0x00700
#define APIC_DM_LOWEST 0x00100
#define APIC_DM_SMI 0x00200
#define APIC_DM_REMRD 0x00300
struct page *time_page;
bool singlestep; /* guest is single stepped by KVM */
- u64 last_guest_tsc;
- u64 last_kernel_ns;
-
bool nmi_pending;
bool nmi_injected;
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
/* Re-load page tables */
load_cr3(next->pgd);
- /* stop flush ipis for the previous mm */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
-
/*
* load the LDT, if the LDT is different:
*/
#define MSR_IA32_MC0_ADDR 0x00000402
#define MSR_IA32_MC0_MISC 0x00000403
-#define MSR_AMD64_MC0_MASK 0xc0010044
-
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
-#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
-
/* These are consecutive and not in the normal 4er MCE bank block */
#define MSR_IA32_MC0_CTL2 0x00000280
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
static inline void pud_clear(pud_t *pudp)
{
+ unsigned long pgd;
+
set_pud(pudp, __pud(0));
/*
* section 8.1: in PAE mode we explicitly have to flush the
* TLB via cr3 if the top-level pgd is changed...
*
- * Currently all places where pud_clear() is called either have
- * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
- * pud_clear_bad()), so we don't need TLB flush here.
+ * Make sure the pud entry we're updating is within the
+ * current pgd to avoid unnecessary TLB flushes.
*/
+ pgd = read_cr3();
+ if (__pa(pudp) >= pgd && __pa(pudp) <
+ (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
+ write_cr3(pgd);
}
#ifdef CONFIG_SMP
return ratio;
}
-/*
- * AMD errata checking
- */
-#ifdef CONFIG_CPU_SUP_AMD
-extern const int amd_erratum_400[];
-extern bool cpu_has_amd_erratum(const int *);
-
-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
-
-#else
-#define cpu_has_amd_erratum(x) (false)
-#endif /* CONFIG_CPU_SUP_AMD */
-
#endif /* _ASM_X86_PROCESSOR_H */
void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
struct pvclock_vcpu_time_info *vcpu,
struct timespec *ts);
-void pvclock_resume(void);
-
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
-{
- u64 product;
-#ifdef __i386__
- u32 tmp1, tmp2;
-#endif
-
- if (shift < 0)
- delta >>= -shift;
- else
- delta <<= shift;
-
-#ifdef __i386__
- __asm__ (
- "mul %5 ; "
- "mov %4,%%eax ; "
- "mov %%edx,%4 ; "
- "mul %5 ; "
- "xor %5,%5 ; "
- "add %4,%%eax ; "
- "adc %5,%%edx ; "
- : "=A" (product), "=r" (tmp1), "=r" (tmp2)
- : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-#elif defined(__x86_64__)
- __asm__ (
- "mul %%rdx ; shrd $32,%%rdx,%%rax"
- : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-#else
-#error implement me!
-#endif
-
- return product;
-}
#endif /* _ASM_X86_PVCLOCK_H */
*/
CMOS_WRITE(0, 0xf);
- *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+ *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
-int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
return 0;
}
- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
- if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
- return 0;
- }
- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
- intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
- printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
- }
+ if (acpi_skip_timer_override &&
+ intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
}
mp_override_legacy_irq(intsrc->source_irq,
}
}
#endif
-
- /* As a rule processors have APIC timer running in deep C states */
- if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
- set_cpu_cap(c, X86_FEATURE_ARAT);
-
- /*
- * Disable GART TLB Walk Errors on Fam10h. We do this here
- * because this is always needed when GART is enabled, even in a
- * kernel which has no MCE support built in.
- */
- if (c->x86 == 0x10) {
- /*
- * BIOS should disable GartTlbWlk Errors themself. If
- * it doesn't do it here as suggested by the BKDG.
- *
- * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
- */
- u64 mask;
-
- rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
- mask |= (1 << 10);
- wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
- }
}
#ifdef CONFIG_X86_32
};
cpu_dev_register(amd_cpu_dev);
-
-/*
- * AMD errata checking
- *
- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
- * have an OSVW id assigned, which it takes as first argument. Both take a
- * variable number of family-specific model-stepping ranges created by
- * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
- * int[] in arch/x86/include/asm/processor.h.
- *
- * Example:
- *
- * const int amd_erratum_319[] =
- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
- */
-
-const int amd_erratum_400[] =
- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
-
-
-bool cpu_has_amd_erratum(const int *erratum)
-{
- struct cpuinfo_x86 *cpu = ¤t_cpu_data;
- int osvw_id = *erratum++;
- u32 range;
- u32 ms;
-
- /*
- * If called early enough that current_cpu_data hasn't been initialized
- * yet, fall back to boot_cpu_data.
- */
- if (cpu->x86 == 0)
- cpu = &boot_cpu_data;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return false;
-
- if (osvw_id >= 0 && osvw_id < 65536 &&
- cpu_has(cpu, X86_FEATURE_OSVW)) {
- u64 osvw_len;
-
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
- if (osvw_id < osvw_len) {
- u64 osvw_bits;
-
- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
- osvw_bits);
- return osvw_bits & (1ULL << (osvw_id & 0x3f));
- }
- }
-
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
- ms = (cpu->x86_model << 4) | cpu->x86_mask;
- while ((range = *erratum++))
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
- (ms >= AMD_MODEL_RANGE_START(range)) &&
- (ms <= AMD_MODEL_RANGE_END(range)))
- return true;
-
- return false;
-}
out_free:
if (b) {
kobject_put(&b->kobj);
- list_del(&b->miscj);
kfree(b);
}
return err;
*/
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
- h = lvtthmr_init;
/*
* The initial value of thermal LVT entries on all APs always reads
* 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
* sequence to them and LVT registers are reset to 0s except for
* the mask bits which are set to 1s when APs receive INIT IPI.
- * If BIOS takes over the thermal interrupt and sets its interrupt
- * delivery mode to SMI (not fixed), it restores the value that the
- * BIOS has programmed on AP based on BSP's info we saved since BIOS
- * is always setting the same value for all threads/cores.
+ * Always restore the value that BIOS has programmed on AP based on
+ * BSP's info we saved since BIOS is always setting the same value
+ * for all threads/cores
*/
- if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
- apic_write(APIC_LVTTHMR, lvtthmr_init);
+ apic_write(APIC_LVTTHMR, lvtthmr_init);
+ h = lvtthmr_init;
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
printk(KERN_DEBUG
/*
* HACK!
- *
- * We use this same function to initialize the mtrrs during boot,
- * resume, runtime cpu online and on an explicit request to set a
- * specific MTRR.
- *
- * During boot or suspend, the state of the boot cpu's mtrrs has been
- * saved, and we want to replicate that across all the cpus that come
- * online (either at the end of boot or resume or during a runtime cpu
- * online). If we're doing that, @reg is set to something special and on
- * this cpu we still do mtrr_if->set_all(). During boot/resume, this
- * is unnecessary if at this point we are still on the cpu that started
- * the boot/resume sequence. But there is no guarantee that we are still
- * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
- * sure that we are in sync with everyone else.
+ * We use this same function to initialize the mtrrs on boot.
+ * The state of the boot cpu's mtrrs has been saved, and we want
+ * to replicate across all the APs.
+ * If we're doing that @reg is set to something special...
*/
if (reg != ~0U)
mtrr_if->set(reg, base, size, type);
- else
+ else if (!mtrr_aps_delayed_init)
mtrr_if->set_all();
/* Wait for the others */
}
/*
- * Delayed MTRR initialization for all AP's
+ * MTRR initialization for all AP's
*/
void mtrr_aps_init(void)
{
if (!use_intel())
return;
- /*
- * Check if someone has requested the delay of AP MTRR initialization,
- * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
- * then we are done.
- */
- if (!mtrr_aps_delayed_init)
- return;
-
set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false;
}
if (!p)
return -EINVAL;
- if (!strcmp(p, "nopentium")) {
#ifdef CONFIG_X86_32
+ if (!strcmp(p, "nopentium")) {
setup_clear_cpu_cap(X86_FEATURE_PSE);
return 0;
-#else
- printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n");
- return -EINVAL;
-#endif
}
+#endif
userdef = 1;
mem_size = memparse(p, &p);
- /* don't remove all of memory when handling "mem={invalid}" param */
- if (mem_size == 0)
- return -EINVAL;
e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
return 0;
static u32 __init ati_sbx00_rev(int num, int slot, int func)
{
- u32 d;
+ u32 old, d;
+ d = read_pci_config(num, slot, func, 0x70);
+ old = d;
+ d &= ~(1<<8);
+ write_pci_config(num, slot, func, 0x70, d);
d = read_pci_config(num, slot, func, 0x8);
d &= 0xff;
+ write_pci_config(num, slot, func, 0x70, old);
return d;
}
{
u32 d, rev;
- rev = ati_sbx00_rev(num, slot, func);
- if (rev >= 0x40)
- acpi_fix_pin2_polarity = 1;
-
- /*
- * SB600: revisions 0x11, 0x12, 0x13, 0x14, ...
- * SB700: revisions 0x39, 0x3a, ...
- * SB800: revisions 0x40, 0x41, ...
- */
- if (rev >= 0x39)
+ if (acpi_use_timer_override)
return;
- if (acpi_use_timer_override)
+ rev = ati_sbx00_rev(num, slot, func);
+ if (rev > 0x13)
return;
/* check for IRQ0 interrupt swap */
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
-END(xen_do_hypervisor_callback)
+END(do_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
if (hpet_msi_disable)
return;
- if (boot_cpu_has(X86_FEATURE_ARAT))
- return;
id = hpet_readl(HPET_ID);
num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
if (id & HPET_ID_LEGSUP) {
hpet_legacy_clockevent_register();
+ hpet_msi_capability_lookup(2);
return 1;
}
+ hpet_msi_capability_lookup(0);
return 0;
out_nohpet:
if (!hpet_virt_address)
return -ENODEV;
- if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
- hpet_msi_capability_lookup(2);
- else
- hpet_msi_capability_lookup(0);
-
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
hpet_print_config();
if (hpet_msi_disable)
return 0;
- if (boot_cpu_has(X86_FEATURE_ARAT))
- return 0;
-
for_each_online_cpu(cpu) {
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
}
unsigned int mpb[0];
};
+#define UCODE_MAX_SIZE 2048
#define UCODE_CONTAINER_SECTION_HDR 8
#define UCODE_CONTAINER_HEADER_SIZE 12
return 0;
}
- if (mc_header->processor_rev_id != equiv_cpu_id)
+ if (mc_header->processor_rev_id != equiv_cpu_id) {
+ printk(KERN_ERR "microcode: CPU%d: patch mismatch "
+ "(processor_rev_id: %x, equiv_cpu_id: %x)\n",
+ cpu, mc_header->processor_rev_id, equiv_cpu_id);
return 0;
+ }
/* ucode might be chipset specific -- currently we don't support this */
if (mc_header->nb_dev_id || mc_header->sb_dev_id) {
return 1;
}
-static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
-{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- unsigned int max_size, actual_size;
-
-#define F1XH_MPB_MAX_SIZE 2048
-#define F14H_MPB_MAX_SIZE 1824
-#define F15H_MPB_MAX_SIZE 4096
-
- switch (c->x86) {
- case 0x14:
- max_size = F14H_MPB_MAX_SIZE;
- break;
- case 0x15:
- max_size = F15H_MPB_MAX_SIZE;
- break;
- default:
- max_size = F1XH_MPB_MAX_SIZE;
- break;
- }
-
- actual_size = buf[4] + (buf[5] << 8);
-
- if (actual_size > size || actual_size > max_size) {
- pr_err("section size mismatch\n");
- return 0;
- }
-
- return actual_size;
-}
-
static int apply_microcode_amd(int cpu)
{
u32 rev, dummy;
}
static void *
-get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
+get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
{
- unsigned int actual_size = 0;
+ unsigned int total_size;
u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
- void *mc = NULL;
+ void *mc;
if (get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR))
return NULL;
return NULL;
}
- actual_size = verify_ucode_size(cpu, buf, size);
- if (!actual_size)
- return NULL;
+ total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
- mc = vmalloc(actual_size);
- if (!mc)
- return NULL;
+ printk(KERN_DEBUG "microcode: size %u, total_size %u\n",
+ size, total_size);
- memset(mc, 0, actual_size);
- get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size);
- *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR;
+ if (total_size > size || total_size > UCODE_MAX_SIZE) {
+ printk(KERN_ERR "microcode: error: size mismatch\n");
+ return NULL;
+ }
+ mc = vmalloc(UCODE_MAX_SIZE);
+ if (mc) {
+ memset(mc, 0, UCODE_MAX_SIZE);
+ if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR,
+ total_size)) {
+ vfree(mc);
+ mc = NULL;
+ } else
+ *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
+ }
return mc;
}
unsigned int uninitialized_var(mc_size);
struct microcode_header_amd *mc_header;
- mc = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size);
+ mc = get_next_ucode(ucode_ptr, leftover, &mc_size);
if (!mc)
break;
return (edx & MWAIT_EDX_C1);
}
+/*
+ * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
+ * For more information see
+ * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
+ * - Erratum #365 for family 0x11 (not affected because C1e not in use)
+ */
+static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
+{
+ u64 val;
+ if (c->x86_vendor != X86_VENDOR_AMD)
+ goto no_c1e_idle;
+
+ /* Family 0x0f models < rev F do not have C1E */
+ if (c->x86 == 0x0F && c->x86_model >= 0x40)
+ return 1;
+
+ if (c->x86 == 0x10) {
+ /*
+ * check OSVW bit for CPUs that are not affected
+ * by erratum #400
+ */
+ if (cpu_has(c, X86_FEATURE_OSVW)) {
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
+ if (val >= 2) {
+ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
+ if (!(val & BIT(1)))
+ goto no_c1e_idle;
+ }
+ }
+ return 1;
+ }
+
+no_c1e_idle:
+ return 0;
+}
+
static cpumask_var_t c1e_mask;
static int c1e_detected;
*/
printk(KERN_INFO "using mwait in idle threads.\n");
pm_idle = mwait_idle;
- } else if (cpu_has_amd_erratum(amd_erratum_400)) {
- /* E400: APIC timer interrupt does not wake up CPU from C1e */
+ } else if (check_c1e_idle(c)) {
printk(KERN_INFO "using C1E aware idle routine\n");
pm_idle = c1e_idle;
} else
static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
{
u64 delta = native_read_tsc() - shadow->tsc_timestamp;
- return pvclock_scale_delta(delta, shadow->tsc_to_nsec_mul,
- shadow->tsc_shift);
+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
}
/*
static atomic64_t last_value = ATOMIC64_INIT(0);
-void pvclock_resume(void)
-{
- atomic64_set(&last_value, 0);
-}
-
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
__setup("notsc", notsc_setup);
-static int no_sched_irq_time;
-
static int __init tsc_setup(char *str)
{
if (!strcmp(str, "reliable"))
tsc_clocksource_reliable = 1;
- if (!strncmp(str, "noirqtime", 9))
- no_sched_irq_time = 1;
return 1;
}
if (!tsc_unstable) {
tsc_unstable = 1;
sched_clock_stable = 0;
- disable_sched_clock_irqtime();
printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
- if (!no_sched_irq_time)
- enable_sched_clock_irqtime();
-
lpj = ((u64)tsc_khz * 1000);
do_div(lpj, HZ);
lpj_fine = lpj;
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
-#include <asm/pvclock.h>
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
struct kvm_vcpu_arch *vcpu = &v->arch;
void *shared_kaddr;
unsigned long this_tsc_khz;
- s64 kernel_ns, max_kernel_ns;
- u64 tsc_timestamp;
if ((!vcpu->time_page))
return;
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
+ kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
ktime_get_ts(&ts);
monotonic_to_bootbased(&ts);
- kernel_ns = timespec_to_ns(&ts);
local_irq_restore(flags);
- /*
- * Time as measured by the TSC may go backwards when resetting the base
- * tsc_timestamp. The reason for this is that the TSC resolution is
- * higher than the resolution of the other clock scales. Thus, many
- * possible measurments of the TSC correspond to one measurement of any
- * other clock, and so a spread of values is possible. This is not a
- * problem for the computation of the nanosecond clock; with TSC rates
- * around 1GHZ, there can only be a few cycles which correspond to one
- * nanosecond value, and any path through this code will inevitably
- * take longer than that. However, with the kernel_ns value itself,
- * the precision may be much lower, down to HZ granularity. If the
- * first sampling of TSC against kernel_ns ends in the low part of the
- * range, and the second in the high end of the range, we can get:
- *
- * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
- *
- * As the sampling errors potentially range in the thousands of cycles,
- * it is possible such a time value has already been observed by the
- * guest. To protect against this, we must compute the system time as
- * observed by the guest and ensure the new system time is greater.
- */
- max_kernel_ns = 0;
- if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
- max_kernel_ns = vcpu->last_guest_tsc -
- vcpu->hv_clock.tsc_timestamp;
- max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
- vcpu->hv_clock.tsc_to_system_mul,
- vcpu->hv_clock.tsc_shift);
- max_kernel_ns += vcpu->last_kernel_ns;
- }
-
- if (max_kernel_ns > kernel_ns)
- kernel_ns = max_kernel_ns;
-
/* With all the info we got, fill in the values */
- vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
- vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
- vcpu->last_kernel_ns = kernel_ns;
- vcpu->last_guest_tsc = tsc_timestamp;
+ vcpu->hv_clock.system_time = ts.tv_nsec +
+ (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
/*
* The interface expects us to write an even number signaling that the
kvm_x86_ops->prepare_guest_switch(vcpu);
kvm_load_guest_fpu(vcpu);
- kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
-
local_irq_disable();
clear_bit(KVM_REQ_KICK, &vcpu->requests);
*/
#ifdef CONFIG_SMP
ENTRY(__write_lock_failed)
- CFI_STARTPROC
+ CFI_STARTPROC simple
FRAME
2: LOCK_PREFIX
addl $ RW_LOCK_BIAS,(%eax)
unsigned long address, unsigned int fault)
{
if (fault & VM_FAULT_OOM) {
- /* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
- up_read(¤t->mm->mmap_sem);
- no_context(regs, error_code, address);
- return;
- }
-
out_of_memory(regs, error_code, address);
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
* section 8.1: in PAE mode we explicitly have to flush the
* TLB via cr3 if the top-level pgd is changed...
*/
- flush_tlb_mm(mm);
+ if (mm == current->active_mm)
+ write_cr3(read_cr3());
}
#else /* !CONFIG_X86_PAE */
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
+ if (pfn > max_pfn_mapped)
+ max_pfn_mapped = pfn;
+
if (!pte_none(pte_page[pteidx]))
continue;
pud_t *l3;
pmd_t *l2;
- /* max_pfn_mapped is the last pfn mapped in the initial memory
- * mappings. Considering that on Xen after the kernel mappings we
- * have the mappings of some pages that don't exist in pfn space, we
- * set max_pfn_mapped to the last real pfn mapped. */
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
-
/* Zap identity mapping */
init_level4_pgt[0] = __pgd(0);
{
pmd_t *kernel_pmd;
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
+ xen_start_info->nr_pt_frames * PAGE_SIZE +
+ 512*1024);
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
{
int cpu;
- pvclock_resume();
-
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
return ret;
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
- if (ret < 0) {
- blk_trace_remove_sysfs(dev);
+ if (ret < 0)
return ret;
- }
kobject_uevent(&q->kobj, KOBJ_ADD);
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
- { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
{
struct ata_device *dev = qc->dev;
+ if (ata_tag_internal(qc->tag))
+ return;
+
if (ata_is_nodata(qc->tf.protocol))
return;
if (unlikely(qc->err_mask))
qc->flags |= ATA_QCFLAG_FAILED;
- /*
- * Finish internal commands without any further processing
- * and always with the result TF filled.
- */
- if (unlikely(ata_tag_internal(qc->tag))) {
- fill_result_tf(qc);
- __ata_qc_complete(qc);
- return;
- }
-
- /*
- * Non-internal qc has failed. Fill the result TF and
- * summon EH.
- */
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
+ /* always fill result TF for failed qc */
fill_result_tf(qc);
- ata_qc_schedule_eh(qc);
+
+ if (!ata_tag_internal(qc->tag))
+ ata_qc_schedule_eh(qc);
+ else
+ __ata_qc_complete(qc);
return;
}
/* configure max sectors */
blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
- sdev->sector_size = ATA_SECT_SIZE;
-
if (dev->class == ATA_DEV_ATAPI) {
struct request_queue *q = sdev->request_queue;
void *buf;
- /* set DMA padding */
+ /* set the min alignment and padding */
+ blk_queue_update_dma_alignment(sdev->request_queue,
+ ATA_DMA_PAD_SZ - 1);
blk_queue_update_dma_pad(sdev->request_queue,
ATA_DMA_PAD_SZ - 1);
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
+ /* ATA devices must be sector aligned */
+ blk_queue_update_dma_alignment(sdev->request_queue,
+ ATA_SECT_SIZE - 1);
sdev->manage_start_stop = 1;
}
- /*
- * ata_pio_sectors() expects buffer for each sector to not cross
- * page boundary. Enforce it by requiring buffers to be sector
- * aligned, which works iff sector_size is not larger than
- * PAGE_SIZE. ATAPI devices also need the alignment as
- * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
- */
- if (sdev->sector_size > PAGE_SIZE)
- ata_dev_printk(dev, KERN_WARNING,
- "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
- sdev->sector_size);
-
- blk_queue_update_dma_alignment(sdev->request_queue,
- sdev->sector_size - 1);
-
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
};
static struct ata_port_operations mpc52xx_ata_port_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &ata_sff_port_ops,
.sff_dev_select = mpc52xx_ata_dev_select,
.set_piomode = mpc52xx_ata_set_piomode,
.set_dmamode = mpc52xx_ata_set_dmamode,
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
static void svia_noop_freeze(struct ata_port *ap);
static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
-static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
static int vt6421_pata_cable_detect(struct ata_port *ap);
static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
.inherits = &svia_base_ops,
.freeze = svia_noop_freeze,
.prereset = vt6420_prereset,
- .bmdma_start = vt6420_bmdma_start,
};
static struct ata_port_operations vt6421_pata_ops = {
return 0;
}
-static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- if ((qc->tf.command == ATA_CMD_PACKET) &&
- (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
- /* Prevents corruption on some ATAPI burners */
- ata_sff_pause(ap);
- }
- ata_bmdma_start(qc);
-}
-
static int vt6421_pata_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
size);
}
if (atmdebug) {
- dev_info(&card->dev->dev, "Received: port %d\n", port);
+ dev_info(&card->dev->dev, "Received: device %d\n", port);
dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
size, le16_to_cpu(header->vpi),
le16_to_cpu(header->vci));
/* Clean up and free oldskb now it's gone */
if (atmdebug) {
- struct pkt_hdr *header = (void *)oldskb->data;
- int size = le16_to_cpu(header->size);
-
- skb_pull(oldskb, sizeof(*header));
dev_info(&card->dev->dev, "Transmitted: port %d\n",
port);
- dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
- size, le16_to_cpu(header->vpi),
- le16_to_cpu(header->vci));
print_buffer(oldskb);
}
printk("Sending %x - down to controller\n", c->busaddr );
#endif /* CCISS_DEBUG */
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
- readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
h->commands_outstanding++;
if ( h->commands_outstanding > h->max_outstanding)
h->max_outstanding = h->commands_outstanding;
pkt_shrink_pktlist(pd);
}
-static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
- /* Apple MacBookPro 7,1 */
- { USB_DEVICE(0x05ac, 0x8213) },
-
/* Apple iMac11,1 */
{ USB_DEVICE(0x05ac, 0x8215) },
- /* Apple MacBookPro6,2 */
- { USB_DEVICE(0x05ac, 0x8218) },
-
- /* Apple MacBookAir3,1, MacBookAir3,2 */
- { USB_DEVICE(0x05ac, 0x821b) },
-
- /* Apple MacBookPro8,2 */
- { USB_DEVICE(0x05ac, 0x821a) },
-
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
struct agp_memory *new;
unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
- if (INT_MAX/sizeof(struct page *) < num_agp_pages)
- return NULL;
-
new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
if (new == NULL)
return NULL;
int scratch_pages;
struct agp_memory *new;
size_t i;
- int cur_memory;
if (!bridge)
return NULL;
- cur_memory = atomic_read(&bridge->current_memory_agp);
- if ((cur_memory + page_count > bridge->max_memory_agp) ||
- (cur_memory + page_count < page_count))
+ if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
return NULL;
if (type >= AGP_USER_TYPES) {
return -EINVAL;
}
- if (((pg_start + mem->page_count) > num_entries) ||
- ((pg_start + mem->page_count) < pg_start))
+ /* AK: could wrap */
+ if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
{
size_t i;
struct agp_bridge_data *bridge;
- int mask_type, num_entries;
+ int mask_type;
bridge = mem->bridge;
if (!bridge)
if (type != mem->type)
return -EINVAL;
- num_entries = agp_num_entries();
- if (((pg_start + mem->page_count) > num_entries) ||
- ((pg_start + mem->page_count) < pg_start))
- return -EINVAL;
-
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0) {
/* The generic routines know nothing of memory types */
*
* This function allocates a new struct iucv_tty_buffer element and, optionally,
* allocates an internal data buffer with the specified size @size.
- * The internal data buffer is always allocated with GFP_DMA which is
- * required for receiving and sending data with IUCV.
* Note: The total message size arises from the internal buffer size and the
* members of the iucv_tty_msg structure.
* The function returns NULL if memory allocation has failed.
if (size > 0) {
bufp->msg.length = MSG_SIZE(size);
- bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
+ bufp->mbuf = kmalloc(bufp->msg.length, flags);
if (!bufp->mbuf) {
mempool_free(bufp, hvc_iucv_mempool);
return NULL;
if (!rb->mbuf) { /* message not yet received ... */
/* allocate mem to store msg data; if no memory is available
* then leave the buffer on the list and re-try later */
- rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
+ rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
if (!rb->mbuf)
return -ENOMEM;
int eax = regs->eax;
#if defined(CONFIG_X86_64)
- asm volatile("pushq %%rax\n\t"
+ asm("pushq %%rax\n\t"
"movl 0(%%rax),%%edx\n\t"
"pushq %%rdx\n\t"
"movl 4(%%rax),%%ebx\n\t"
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#else
- asm volatile("pushl %%eax\n\t"
+ asm("pushl %%eax\n\t"
"movl 0(%%eax),%%edx\n\t"
"push %%edx\n\t"
"movl 4(%%eax),%%ebx\n\t"
"movl %%edx,0(%%eax)\n\t"
"lahf\n\t"
"shrl $8,%%eax\n\t"
- "andl $1,%%eax\n"
- :"=a"(rc)
+ "andl $1,%%eax\n":"=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#endif
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED) {
+ if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
- /* if duration is 0, it's because chip->vendor.duration wasn't */
- /* filled yet, so we set the lowest timeout just to give enough */
- /* time for tpm_get_timeouts() to succeed */
- return (duration <= 0 ? HZ : duration);
- } else
+ if (duration <= 0)
return 2 * 60 * HZ;
+ else
+ return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
if (rc)
return;
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code)
+ != 3 * sizeof(u32))
return;
-
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
-ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d %d %d\n",
- jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
- jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
- jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
-}
-EXPORT_SYMBOL_GPL(tpm_show_timeouts);
-
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return -EBUSY;
}
- chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
+ chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
put_device(chip->dev);
char *);
extern ssize_t tpm_show_temp_deactivated(struct device *,
struct device_attribute *attr, char *);
-extern ssize_t tpm_show_timeouts(struct device *,
- struct device_attribute *attr, char *);
struct tpm_chip;
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_timeouts.attr, NULL,
+ &dev_attr_cancel.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
}
/* generate SMI */
- /* inb to force posted write through and make SMI happen now */
asm volatile (
- "outb %b0,%w1\n"
- "inb %w1"
+ "outb %b0,%w1"
: /* no output args */
: "a" (smi_cmd->command_code),
"d" (smi_cmd->command_address),
config DRM_I915
tristate "i915 driver"
depends on AGP_INTEL
- # we need shmfs for the swappable backing store, and in particular
- # the shmem_readpage() which depends upon tmpfs
select SHMEM
- select TMPFS
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int ret = 0;
- unsigned int crtc;
+ int crtc, ret = 0;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
},
},
- {
- .callback = intel_no_lvds_dmi_callback,
- .ident = "AOpen i915GMm-HFS",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
- DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
- },
- },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
case ATOM_IIO_MOVE_INDEX:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 3));
+ CU8(base + 2));
temp |=
((index >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
case ATOM_IIO_MOVE_DATA:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 3));
+ CU8(base + 2));
temp |=
((data >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
case ATOM_IIO_MOVE_ATTR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 3));
+ CU8(base + 2));
temp |=
((ctx->
io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
return false;
}
- /* mac rv630, rv730, others */
- if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
- (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
- *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
- *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
- }
-
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x9598) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
/* tell the bios not to handle mode switching */
- bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+ bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
else
bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
- if (lock) {
+ if (lock)
bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
- bios_6_scratch &= ~ATOM_S6_ACC_MODE;
- } else {
+ else
bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
- bios_6_scratch |= ATOM_S6_ACC_MODE;
- }
if (rdev->family >= CHIP_R600)
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
*frac_fb_div_p = best_frac_feedback_div;
*ref_div_p = best_ref_div;
*post_div_p = best_post_div;
- DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
- freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
- best_ref_div, best_post_div);
-
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
u8 flags;
};
-static const struct apple_key_translation macbookair_fn_keys[] = {
- { KEY_BACKSPACE, KEY_DELETE },
- { KEY_ENTER, KEY_INSERT },
- { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
- { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
- { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
- { KEY_F4, KEY_DASHBOARD, APPLE_FLAG_FKEY },
- { KEY_F6, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
- { KEY_F7, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
- { KEY_F8, KEY_NEXTSONG, APPLE_FLAG_FKEY },
- { KEY_F9, KEY_MUTE, APPLE_FLAG_FKEY },
- { KEY_F10, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
- { KEY_F11, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
- { KEY_F12, KEY_EJECTCD, APPLE_FLAG_FKEY },
- { KEY_UP, KEY_PAGEUP },
- { KEY_DOWN, KEY_PAGEDOWN },
- { KEY_LEFT, KEY_HOME },
- { KEY_RIGHT, KEY_END },
- { }
-};
-
static const struct apple_key_translation apple_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
if (fnmode) {
int do_translate;
- if(hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
- hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) {
- trans = apple_find_translation(macbookair_fn_keys, usage->code);
- } else if (hid->product < 0x21d || hid->product >= 0x300) {
- trans = apple_find_translation(powerbook_fn_keys, usage->code);
- } else {
- trans = apple_find_translation(apple_fn_keys, usage->code);
- }
-
+ trans = apple_find_translation((hid->product < 0x21d ||
+ hid->product >= 0x300) ?
+ powerbook_fn_keys : apple_fn_keys,
+ usage->code);
if (trans) {
if (test_bit(usage->code, asc->pressed_fn))
do_translate = 1;
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
- .driver_data = APPLE_HAS_FN },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
- .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
- .driver_data = APPLE_HAS_FN },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
- .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
- .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
{ }
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
-#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
-#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
-#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
-#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
-#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
-#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_CYPRESS_ULTRAMOUSE 0x7417
#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
-#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a
"TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
"TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
NULL },
-/* Set 17: iMac 9,1 */
- { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P",
- "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL },
-/* Set 18: MacBook Pro 2,2 */
- { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
- "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
-/* Set 19: Macbook Pro 5,3 */
- { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
- "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
- "Tm0P", "Ts0P", "Ts0S", NULL },
-/* Set 20: MacBook Pro 5,4 */
- { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
- "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
-/* Set 21: MacBook Pro 6,2 */
- { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
- "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
- "Ts0P", "Ts0S", NULL },
-/* Set 22: MacBook Pro 7,1 */
- { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
- "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
-/* Set 23: MacBook Air 3,1 */
- { "TB0T", "TB1T", "TB2T", "TC0D", "TC0E", "TC0P", "TC1E", "TCZ3",
- "TCZ4", "TCZ5", "TG0E", "TG1E", "TG2E", "TGZ3", "TGZ4", "TGZ5",
- "TH0F", "TH0O", "TM0P" },
};
/* List of keys used to read/write fan speeds */
{ .accelerometer = 1, .light = 1, .temperature_set = 15 },
/* MacPro3,1: temperature set 16 */
{ .accelerometer = 0, .light = 0, .temperature_set = 16 },
-/* iMac 9,1: light sensor only, temperature set 17 */
- { .accelerometer = 0, .light = 0, .temperature_set = 17 },
-/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
- { .accelerometer = 1, .light = 1, .temperature_set = 18 },
-/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
- { .accelerometer = 1, .light = 1, .temperature_set = 19 },
-/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
- { .accelerometer = 1, .light = 1, .temperature_set = 20 },
-/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
- { .accelerometer = 1, .light = 1, .temperature_set = 21 },
-/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
- { .accelerometer = 1, .light = 1, .temperature_set = 22 },
-/* MacBook Air 3,1: accelerometer, backlight and temperature set 23 */
- { .accelerometer = 0, .light = 0, .temperature_set = 23 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
* So we need to put "Apple MacBook Pro" before "Apple MacBook". */
static __initdata struct dmi_system_id applesmc_whitelist[] = {
- { applesmc_dmi_match, "Apple MacBook Air 3", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3") },
- &applesmc_dmi_data[23]},
{ applesmc_dmi_match, "Apple MacBook Air 2", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2") },
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
&applesmc_dmi_data[7]},
- { applesmc_dmi_match, "Apple MacBook Pro 7", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
- &applesmc_dmi_data[22]},
- { applesmc_dmi_match, "Apple MacBook Pro 5,4", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
- &applesmc_dmi_data[20]},
- { applesmc_dmi_match, "Apple MacBook Pro 5,3", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
- &applesmc_dmi_data[19]},
- { applesmc_dmi_match, "Apple MacBook Pro 6", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
- &applesmc_dmi_data[21]},
{ applesmc_dmi_match, "Apple MacBook Pro 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
&applesmc_dmi_data[9]},
- { applesmc_dmi_match, "Apple MacBook Pro 2,2", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") },
- &applesmc_dmi_data[18]},
{ applesmc_dmi_match, "Apple MacBook Pro", {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
&applesmc_dmi_data[4]},
- { applesmc_dmi_match, "Apple iMac 9,1", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") },
- &applesmc_dmi_data[17]},
{ applesmc_dmi_match, "Apple iMac 8", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
int i;
struct f71882fg_data *data = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
if (data->hwmon_dev)
hwmon_device_unregister(data->hwmon_dev);
for (i = 0; i < ARRAY_SIZE(f8000_fan_attr); i++)
device_remove_file(&pdev->dev, &f8000_fan_attr[i].dev_attr);
- platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
const int c1 = -4;
const int c2 = 40500; /* x 10 ^ -6 */
- const int c3 = -28; /* x 10 ^ -7 */
+ const int c3 = -2800; /* x10 ^ -9 */
RHlinear = c1*1000
+ c2 * data->val_humid/1000
- + (data->val_humid * data->val_humid * c3) / 10000;
+ + (data->val_humid * data->val_humid * c3)/1000000;
return (temp - 25000) * (10000 + 80 * data->val_humid)
/ 1000000 + RHlinear;
}
return 0;
}
-static void via686a_update_fan_div(struct via686a_data *data)
-{
- int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
- data->fan_div[0] = (reg >> 4) & 0x03;
- data->fan_div[1] = reg >> 6;
-}
-
static void __devinit via686a_init_device(struct via686a_data *data)
{
u8 reg;
via686a_write_value(data, VIA686A_REG_TEMP_MODE,
(reg & ~VIA686A_TEMP_MODE_MASK)
| VIA686A_TEMP_MODE_CONTINUOUS);
-
- /* Pre-read fan clock divisor values */
- via686a_update_fan_div(data);
}
static struct via686a_data *via686a_update_device(struct device *dev)
(via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
0xc0) >> 6;
- via686a_update_fan_div(data);
+ i = via686a_read_value(data, VIA686A_REG_FANDIV);
+ data->fan_div[0] = (i >> 4) & 0x03;
+ data->fan_div[1] = i >> 6;
data->alarms =
via686a_read_value(data,
VIA686A_REG_ALARM1) |
}
static int __unregister_client(struct device *dev, void *dummy)
-{
- struct i2c_client *client = i2c_verify_client(dev);
- if (client && strcmp(client->name, "dummy"))
- i2c_unregister_device(client);
- return 0;
-}
-
-static int __unregister_dummy(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
if (client)
}
/* Detach any active clients. This can't fail, thus we do not
- * check the returned value. This is a two-pass process, because
- * we can't remove the dummy devices during the first pass: they
- * could have been instantiated by real devices wishing to clean
- * them up properly, so we give them a chance to do that first. */
+ checking the returned value. */
res = device_for_each_child(&adap->dev, NULL, __unregister_client);
- res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
#ifdef CONFIG_I2C_COMPAT
class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
goto out; /* No match. */
}
atomic_inc(&cur_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
- /*
- * Protect against the user destroying conn_id from another thread
- * until we're done accessing it.
- */
- atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (!ret) {
/*
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
- cma_deref_id(conn_id);
goto out;
}
- cma_deref_id(conn_id);
/* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL;
event.param.conn.private_data_len = iw_event->private_data_len;
event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
event.param.conn.responder_resources = attr.max_qp_rd_atom;
-
- /*
- * Protect against the user destroying conn_id from another thread
- * until we're done accessing it.
- */
- atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret) {
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, CMA_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
- cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
goto out;
}
mutex_unlock(&conn_id->handler_mutex);
- cma_deref_id(conn_id);
out:
if (dev)
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
-/* MacbookAir3,2 (unibody), aka wellspring5 */
-#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
-#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
-#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
-/* MacbookAir3,1 (unibody), aka wellspring4 */
-#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
-#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
-#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
- /* MacbookAir3,2 */
- BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
- BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
- BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
- /* MacbookAir3,1 */
- BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
- BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
- BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
/* Terminating entry */
{}
};
{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
},
- {
- USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
- USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
- USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
- HAS_INTEGRATED_BUTTON,
- 0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
- { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
- { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
- { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
- { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
- },
- {
- USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
- USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
- USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
- HAS_INTEGRATED_BUTTON,
- 0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
- { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
- { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
- { DIM_X, DIM_X / SN_COORD, -4616, 5112 },
- { DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
- },
{}
};
printk(KERN_DEBUG format, ##arg); \
} while (0)
-static bool force_elantech;
-module_param_named(force_elantech, force_elantech, bool, 0644);
-MODULE_PARM_DESC(force_elantech, "Force the Elantech PS/2 protocol extension to be used, 1 = enabled, 0 = disabled (default).");
-
/*
* Send a Synaptics style sliced query command
*/
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
int fingers;
+ static int old_fingers;
- if (etd->fw_version < 0x020000) {
- /*
- * byte 0: D U p1 p2 1 p3 R L
- * byte 1: f 0 th tw x9 x8 y9 y8
- */
+ if (etd->fw_version_maj == 0x01) {
+ /* byte 0: D U p1 p2 1 p3 R L
+ byte 1: f 0 th tw x9 x8 y9 y8 */
fingers = ((packet[1] & 0x80) >> 7) +
((packet[1] & 0x30) >> 4);
} else {
- /*
- * byte 0: n1 n0 p2 p1 1 p3 R L
- * byte 1: 0 0 0 0 x9 x8 y9 y8
- */
+ /* byte 0: n1 n0 p2 p1 1 p3 R L
+ byte 1: 0 0 0 0 x9 x8 y9 y8 */
fingers = (packet[0] & 0xc0) >> 6;
}
if (etd->jumpy_cursor) {
- if (fingers != 1) {
- etd->single_finger_reports = 0;
- } else if (etd->single_finger_reports < 2) {
- /* Discard first 2 reports of one finger, bogus */
- etd->single_finger_reports++;
+ /* Discard packets that are likely to have bogus coordinates */
+ if (fingers > old_fingers) {
elantech_debug("elantech.c: discarding packet\n");
- return;
+ goto discard_packet_v1;
}
}
input_report_key(dev, BTN_TOUCH, fingers != 0);
- /*
- * byte 2: x7 x6 x5 x4 x3 x2 x1 x0
- * byte 3: y7 y6 y5 y4 y3 y2 y1 y0
- */
+ /* byte 2: x7 x6 x5 x4 x3 x2 x1 x0
+ byte 3: y7 y6 y5 y4 y3 y2 y1 y0 */
if (fingers) {
input_report_abs(dev, ABS_X,
((packet[1] & 0x0c) << 6) | packet[2]);
- input_report_abs(dev, ABS_Y,
- ETP_YMAX_V1 - (((packet[1] & 0x03) << 8) | packet[3]));
+ input_report_abs(dev, ABS_Y, ETP_YMAX_V1 -
+ (((packet[1] & 0x03) << 8) | packet[3]));
}
input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
- if (etd->fw_version < 0x020000 &&
+ if ((etd->fw_version_maj == 0x01) &&
(etd->capabilities & ETP_CAP_HAS_ROCKER)) {
/* rocker up */
input_report_key(dev, BTN_FORWARD, packet[0] & 0x40);
}
input_sync(dev);
+
+ discard_packet_v1:
+ old_fingers = fingers;
}
/*
switch (fingers) {
case 1:
- /*
- * byte 1: . . . . . x10 x9 x8
- * byte 2: x7 x6 x5 x4 x4 x2 x1 x0
- */
- input_report_abs(dev, ABS_X,
- ((packet[1] & 0x07) << 8) | packet[2]);
- /*
- * byte 4: . . . . . . y9 y8
- * byte 5: y7 y6 y5 y4 y3 y2 y1 y0
- */
- input_report_abs(dev, ABS_Y,
- ETP_YMAX_V2 - (((packet[4] & 0x03) << 8) | packet[5]));
+ /* byte 1: x15 x14 x13 x12 x11 x10 x9 x8
+ byte 2: x7 x6 x5 x4 x4 x2 x1 x0 */
+ input_report_abs(dev, ABS_X, (packet[1] << 8) | packet[2]);
+ /* byte 4: y15 y14 y13 y12 y11 y10 y8 y8
+ byte 5: y7 y6 y5 y4 y3 y2 y1 y0 */
+ input_report_abs(dev, ABS_Y, ETP_YMAX_V2 -
+ ((packet[4] << 8) | packet[5]));
break;
case 2:
- /*
- * The coordinate of each finger is reported separately
- * with a lower resolution for two finger touches:
- * byte 0: . . ay8 ax8 . . . .
- * byte 1: ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0
- */
+ /* The coordinate of each finger is reported separately with
+ a lower resolution for two finger touches */
+ /* byte 0: . . ay8 ax8 . . . .
+ byte 1: ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0 */
x1 = ((packet[0] & 0x10) << 4) | packet[1];
/* byte 2: ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0 */
y1 = ETP_2FT_YMAX - (((packet[0] & 0x20) << 3) | packet[2]);
- /*
- * byte 3: . . by8 bx8 . . . .
- * byte 4: bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0
- */
+ /* byte 3: . . by8 bx8 . . . .
+ byte 4: bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0 */
x2 = ((packet[3] & 0x10) << 4) | packet[4];
/* byte 5: by7 by8 by5 by4 by3 by2 by1 by0 */
y2 = ETP_2FT_YMAX - (((packet[3] & 0x20) << 3) | packet[5]);
- /*
- * For compatibility with the X Synaptics driver scale up
- * one coordinate and report as ordinary mouse movent
- */
+ /* For compatibility with the X Synaptics driver scale up one
+ coordinate and report as ordinary mouse movent */
input_report_abs(dev, ABS_X, x1 << 2);
input_report_abs(dev, ABS_Y, y1 << 2);
- /*
- * For compatibility with the proprietary X Elantech driver
- * report both coordinates as hat coordinates
- */
+ /* For compatibility with the proprietary X Elantech driver
+ report both coordinates as hat coordinates */
input_report_abs(dev, ABS_HAT0X, x1);
input_report_abs(dev, ABS_HAT0Y, y1);
input_report_abs(dev, ABS_HAT1X, x2);
unsigned char p1, p2, p3;
/* Parity bits are placed differently */
- if (etd->fw_version < 0x020000) {
+ if (etd->fw_version_maj == 0x01) {
/* byte 0: D U p1 p2 1 p3 R L */
p1 = (packet[0] & 0x20) >> 5;
p2 = (packet[0] & 0x10) >> 4;
__set_bit(EV_KEY, dev->evbit);
__set_bit(EV_ABS, dev->evbit);
- __clear_bit(EV_REL, dev->evbit);
__set_bit(BTN_LEFT, dev->keybit);
__set_bit(BTN_RIGHT, dev->keybit);
switch (etd->hw_version) {
case 1:
/* Rocker button */
- if (etd->fw_version < 0x020000 &&
+ if ((etd->fw_version_maj == 0x01) &&
(etd->capabilities & ETP_CAP_HAS_ROCKER)) {
__set_bit(BTN_FORWARD, dev->keybit);
__set_bit(BTN_BACK, dev->keybit);
.attrs = elantech_attrs,
};
-static bool elantech_is_signature_valid(const unsigned char *param)
-{
- static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10 };
- int i;
-
- if (param[0] == 0)
- return false;
-
- if (param[1] == 0)
- return true;
-
- for (i = 0; i < ARRAY_SIZE(rates); i++)
- if (param[2] == rates[i])
- return false;
-
- return true;
-}
-
/*
* Use magic knock to detect Elantech touchpad
*/
pr_debug("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
param[0], param[1], param[2]);
- if (!elantech_is_signature_valid(param)) {
- if (!force_elantech) {
- pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");
- return -1;
- }
-
- pr_debug("elantech.c: Probably not a real Elantech touchpad. Enabling anyway due to force_elantech.\n");
+ if (param[0] == 0 || param[1] != 0) {
+ pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");
+ return -1;
}
if (set_properties) {
pr_err("elantech.c: failed to query firmware version.\n");
goto init_fail;
}
-
- etd->fw_version = (param[0] << 16) | (param[1] << 8) | param[2];
+ etd->fw_version_maj = param[0];
+ etd->fw_version_min = param[2];
/*
* Assume every version greater than this is new EeePC style
* hardware with 6 byte packets
*/
- if (etd->fw_version >= 0x020030) {
+ if (etd->fw_version_maj >= 0x02 && etd->fw_version_min >= 0x30) {
etd->hw_version = 2;
/* For now show extra debug information */
etd->debug = 1;
etd->hw_version = 1;
etd->paritycheck = 1;
}
-
- pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d.%d\n",
- etd->hw_version, param[0], param[1], param[2]);
+ pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d\n",
+ etd->hw_version, etd->fw_version_maj, etd->fw_version_min);
if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) {
pr_err("elantech.c: failed to query capabilities.\n");
etd->capabilities = param[0];
/*
- * This firmware suffers from misreporting coordinates when
+ * This firmware seems to suffer from misreporting coordinates when
* a touch action starts causing the mouse cursor or scrolled page
* to jump. Enable a workaround.
*/
- if (etd->fw_version == 0x020022 || etd->fw_version == 0x020600) {
- pr_info("elantech.c: firmware version 2.0.34/2.6.0 detected, "
+ if (etd->fw_version_maj == 0x02 && etd->fw_version_min == 0x22) {
+ pr_info("elantech.c: firmware version 2.34 detected, "
"enabling jumpy cursor workaround\n");
- etd->jumpy_cursor = true;
+ etd->jumpy_cursor = 1;
}
if (elantech_set_absolute_mode(psmouse)) {
unsigned char reg_26;
unsigned char debug;
unsigned char capabilities;
- bool paritycheck;
- bool jumpy_cursor;
+ unsigned char fw_version_maj;
+ unsigned char fw_version_min;
unsigned char hw_version;
- unsigned int fw_version;
- unsigned int single_finger_reports;
+ unsigned char paritycheck;
+ unsigned char jumpy_cursor;
unsigned char parity[256];
};
if (synaptics_send_cmd(psmouse, SYN_QUE_CAPABILITIES, cap))
return -1;
priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2];
- priv->ext_cap = priv->ext_cap_0c = 0;
-
+ priv->ext_cap = 0;
if (!SYN_CAP_VALID(priv->capabilities))
return -1;
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 1) {
if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB, cap)) {
printk(KERN_ERR "Synaptics claims to have extended capabilities,"
- " but I'm not able to read them.\n");
+ " but I'm not able to read them.");
} else {
priv->ext_cap = (cap[0] << 16) | (cap[1] << 8) | cap[2];
priv->ext_cap &= 0xff0fff;
}
}
-
- if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 4) {
- if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB_0C, cap)) {
- printk(KERN_ERR "Synaptics claims to have extended capability 0x0c,"
- " but I'm not able to read it.\n");
- } else {
- priv->ext_cap_0c = (cap[0] << 16) | (cap[1] << 8) | cap[2];
- }
- }
-
return 0;
}
hw->left = (buf[0] & 0x01) ? 1 : 0;
hw->right = (buf[0] & 0x02) ? 1 : 0;
- if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
- /*
- * Clickpad's button is transmitted as middle button,
- * however, since it is primary button, we will report
- * it as BTN_LEFT.
- */
- hw->left = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0;
-
- } else if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
+ if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
hw->middle = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0;
if (hw->w == 2)
hw->scroll = (signed char)(buf[1]);
dev->absres[ABS_X] = priv->x_res;
dev->absres[ABS_Y] = priv->y_res;
-
- if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
- /* Clickpads report only left button */
- __clear_bit(BTN_RIGHT, dev->keybit);
- __clear_bit(BTN_MIDDLE, dev->keybit);
- }
}
static void synaptics_disconnect(struct psmouse *psmouse)
priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
- printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx\n",
+ printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx\n",
SYN_ID_MODEL(priv->identity),
SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
- priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c);
+ priv->model_id, priv->capabilities, priv->ext_cap);
set_input_params(psmouse->dev, priv);
#define SYN_QUE_SERIAL_NUMBER_SUFFIX 0x07
#define SYN_QUE_RESOLUTION 0x08
#define SYN_QUE_EXT_CAPAB 0x09
-#define SYN_QUE_EXT_CAPAB_0C 0x0c
/* synatics modes */
#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
#define SYN_CAP_VALID(c) ((((c) & 0x00ff00) >> 8) == 0x47)
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
-#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
-#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100)
/* synaptics modes query bits */
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
unsigned long int model_id; /* Model-ID */
unsigned long int capabilities; /* Capabilities */
unsigned long int ext_cap; /* Extended Capabilities */
- unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
unsigned long int identity; /* Identification */
int x_res; /* X resolution in units/mm */
int y_res; /* Y resolution in units/mm */
DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
},
- {
- /* Dell Vostro V13 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
- },
- },
{ }
};
};
#endif
-static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
- {
- /* Dell Vostro V13 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
- },
- },
- { }
-};
-
/*
* Some Wistron based laptops need us to explicitly enable the 'Dritek
* keyboard extension' to make their extra keys start generating scancodes.
if (dmi_check_system(i8042_dmi_nomux_table))
i8042_nomux = true;
- if (dmi_check_system(i8042_dmi_notimeout_table))
- i8042_notimeout = true;
-
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
#endif /* CONFIG_X86 */
module_param_named(panicblink, i8042_blink_frequency, uint, 0600);
MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics");
-static bool i8042_notimeout;
-module_param_named(notimeout, i8042_notimeout, bool, 0);
-MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
-
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
} else {
dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
- ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
+ ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
port_no = (str & I8042_STR_AUXDATA) ?
I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
static int __devinit xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
- int ret, i, abs;
+ int ret, i;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr;
if (!info->page)
goto error_nomem;
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
- abs = 0;
- if (abs)
- xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1");
-
/* keyboard */
kbd = input_allocate_device();
if (!kbd)
kbd->id.bustype = BUS_PCI;
kbd->id.vendor = 0x5853;
kbd->id.product = 0xffff;
-
- __set_bit(EV_KEY, kbd->evbit);
+ kbd->evbit[0] = BIT(EV_KEY);
for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
- __set_bit(i, kbd->keybit);
+ set_bit(i, kbd->keybit);
for (i = KEY_OK; i < KEY_MAX; i++)
- __set_bit(i, kbd->keybit);
+ set_bit(i, kbd->keybit);
ret = input_register_device(kbd);
if (ret) {
ptr->id.bustype = BUS_PCI;
ptr->id.vendor = 0x5853;
ptr->id.product = 0xfffe;
-
- if (abs) {
- __set_bit(EV_ABS, ptr->evbit);
- input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
- input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
- } else {
- input_set_capability(ptr, EV_REL, REL_X);
- input_set_capability(ptr, EV_REL, REL_Y);
- }
- input_set_capability(ptr, EV_REL, REL_WHEEL);
-
- __set_bit(EV_KEY, ptr->evbit);
+ ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
for (i = BTN_LEFT; i <= BTN_TASK; i++)
- __set_bit(i, ptr->keybit);
+ set_bit(i, ptr->keybit);
+ ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
+ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
+ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
ret = input_register_device(ptr);
if (ret) {
ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
"request-abs-pointer", "1");
if (ret)
- pr_warning("can't request abs-pointer\n");
+ printk(KERN_WARNING
+ "xenkbd: can't request abs-pointer");
}
xenbus_switch_state(dev, XenbusStateConnected);
break;
ll_unload(csta);
}
-static irqreturn_t card_irq(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- irqreturn_t ret = cs->irq_func(intno, cs);
-
- if (ret == IRQ_HANDLED)
- cs->irq_cnt++;
- return ret;
-}
-
static int init_card(struct IsdnCardState *cs)
{
int irq_cnt, cnt = 3, ret;
ret = cs->cardmsg(cs, CARD_INIT, NULL);
return(ret);
}
- irq_cnt = cs->irq_cnt = 0;
+ irq_cnt = kstat_irqs(cs->irq);
printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
cs->irq, irq_cnt);
- if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
+ if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) {
printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
cs->irq);
return 1;
/* Timeout 10ms */
msleep(10);
printk(KERN_INFO "%s: IRQ %d count %d\n",
- CardType[cs->typ], cs->irq, cs->irq_cnt);
- if (cs->irq_cnt == irq_cnt) {
+ CardType[cs->typ], cs->irq, kstat_irqs(cs->irq));
+ if (kstat_irqs(cs->irq) == irq_cnt) {
printk(KERN_WARNING
"%s: IRQ(%d) getting no interrupts during init %d\n",
CardType[cs->typ], cs->irq, 4 - cnt);
u_long event;
struct work_struct tqueue;
struct timer_list dbusytimer;
- unsigned int irq_cnt;
#ifdef ERROR_STATISTIC
int err_crc;
int err_tx;
if ((info->flags & ISDN_ASYNC_CLOSING) || (!info->tty)) {
return;
}
+#ifdef CONFIG_ISDN_AUDIO
+ if ( !info->vonline )
+ tty_ldisc_flush(info->tty);
+#else
+ tty_ldisc_flush(info->tty);
+#endif
if ((info->flags & ISDN_ASYNC_CHECK_CD) &&
(!((info->flags & ISDN_ASYNC_CALLOUT_ACTIVE) &&
(info->flags & ISDN_ASYNC_CALLOUT_NOHUP)))) {
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
+ struct work_struct deactivate_path;
struct work_struct activate_path;
};
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+static void deactivate_path(struct work_struct *work);
/*-----------------------------------------------
if (pgpath) {
pgpath->is_active = 1;
+ INIT_WORK(&pgpath->deactivate_path, deactivate_path);
INIT_WORK(&pgpath->activate_path, activate_path);
}
kfree(pgpath);
}
+static void deactivate_path(struct work_struct *work)
+{
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, deactivate_path);
+
+ blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
+}
+
static struct priority_group *alloc_priority_group(void)
{
struct priority_group *pg;
pgpath->path.dev->name, m->nr_valid_paths);
schedule_work(&m->trigger_event);
+ queue_work(kmultipathd, &pgpath->deactivate_path);
out:
spin_unlock_irqrestore(&m->lock, flags);
/*
* Dispatch io.
*/
- if (unlikely(ms->log_failure) && errors_handled(ms)) {
+ if (unlikely(ms->log_failure)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
spin_lock_irq(&rh->region_lock);
if (success)
list_add(®->list, ®->rh->recovered_regions);
- else
+ else {
+ reg->state = DM_RH_NOSYNC;
list_add(®->list, ®->rh->failed_recovered_regions);
-
+ }
spin_unlock_irq(&rh->region_lock);
rh->wakeup_workers(rh->context);
wake_up(&md->eventq);
}
-/*
- * Protected by md->suspend_lock obtained by dm_swap_table().
- */
static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
+ mutex_lock(&md->bdev->bd_inode->i_mutex);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
+ mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
static int __bind(struct mapped_device *md, struct dm_table *t,
{
mddev_t *mddev, *new = NULL;
- if (unit && MAJOR(unit) != MD_MAJOR)
- unit &= ~((1<<MdpMinorShift)-1);
-
retry:
spin_lock(&all_mddevs_lock);
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
- info->raid_disk < mddev->raid_disks) {
+ info->raid_disk < mddev->raid_disks)
rdev->raid_disk = info->raid_disk;
- set_bit(In_sync, &rdev->flags);
- } else
+ else
rdev->raid_disk = -1;
} else
super_types[mddev->major_version].
DEBSTATUS);
#define DRIVER_VERSION "0.1"
-#define DRIVER_NAME "flexcop-pci"
+#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver"
#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
struct flexcop_pci {
{ 0x1d37, KEY_RECORD },
{ 0x1d3b, KEY_GOTO },
{ 0x1d3d, KEY_POWER },
-
- /* Key codes for the Elgato EyeTV Diversity silver remote,
- set dvb_usb_dib0700_ir_proto=0 */
- { 0x4501, KEY_POWER },
- { 0x4502, KEY_MUTE },
- { 0x4503, KEY_1 },
- { 0x4504, KEY_2 },
- { 0x4505, KEY_3 },
- { 0x4506, KEY_4 },
- { 0x4507, KEY_5 },
- { 0x4508, KEY_6 },
- { 0x4509, KEY_7 },
- { 0x450a, KEY_8 },
- { 0x450b, KEY_9 },
- { 0x450c, KEY_LAST },
- { 0x450d, KEY_0 },
- { 0x450e, KEY_ENTER },
- { 0x450f, KEY_RED },
- { 0x4510, KEY_CHANNELUP },
- { 0x4511, KEY_GREEN },
- { 0x4512, KEY_VOLUMEDOWN },
- { 0x4513, KEY_OK },
- { 0x4514, KEY_VOLUMEUP },
- { 0x4515, KEY_YELLOW },
- { 0x4516, KEY_CHANNELDOWN },
- { 0x4517, KEY_BLUE },
- { 0x4518, KEY_LEFT }, /* Skip backwards */
- { 0x4519, KEY_PLAYPAUSE },
- { 0x451a, KEY_RIGHT }, /* Skip forward */
- { 0x451b, KEY_REWIND },
- { 0x451c, KEY_L }, /* Live */
- { 0x451d, KEY_FASTFORWARD },
- { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */
- { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */
- { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */
- { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */
- { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */
};
/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) },
{ USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) },
- { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DIVERSITY) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
}
},
- .num_device_descs = 7,
+ .num_device_descs = 6,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
{ "Sony PlayTV",
{ &dib0700_usb_id_table[44], NULL },
{ NULL },
- },
- { "Elgato EyeTV Diversity",
- { &dib0700_usb_id_table[64], NULL },
- { NULL },
- },
+ }
},
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_key_map = dib0700_rc_keys,
#define USB_PID_TELESTAR_STARSTICK_2 0x8000
#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
#define USB_PID_SONY_PLAYTV 0x0003
-#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
{
ca_slot_info_t *info=(ca_slot_info_t *)parg;
- if (info->num < 0 || info->num > 1)
+ if (info->num > 1)
return -EINVAL;
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
-#include <linux/delay.h> /* msleep */
+#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
/* local things */
+static void sleep_delay(long n)
+{
+ /* Sleep nicely for 'n' uS */
+ int d = n / msecs_to_jiffies(1000);
+ if (!d)
+ udelay(n);
+ else
+ msleep(jiffies_to_msecs(d));
+}
+
static void rt_decvol(struct rtrack *rt)
{
outb(0x58, rt->io); /* volume down + sigstr + on */
- msleep(100);
+ sleep_delay(100000);
outb(0xd8, rt->io); /* volume steady + sigstr + on */
}
static void rt_incvol(struct rtrack *rt)
{
outb(0x98, rt->io); /* volume up + sigstr + on */
- msleep(100);
+ sleep_delay(100000);
outb(0xd8, rt->io); /* volume steady + sigstr + on */
}
if (vol == 0) { /* volume = 0 means mute the card */
outb(0x48, rt->io); /* volume down but still "on" */
- msleep(2000); /* make sure it's totally down */
+ sleep_delay(2000000); /* make sure it's totally down */
outb(0xd0, rt->io); /* volume steady, off */
rt->curvol = 0; /* track the volume state! */
mutex_unlock(&rt->lock);
outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */
outb_p(128+64+16+8+2+1, rt->io); /* clock */
}
- msleep(1);
+ sleep_delay(1000);
}
static void send_1_byte(struct rtrack *rt)
outb_p(128+64+16+8+4+2+1, rt->io); /* clock */
}
- msleep(1);
+ sleep_delay(1000);
}
static int rt_setfreq(struct rtrack *rt, unsigned long freq)
/* this ensures that the volume is all the way down */
outb(0x48, rt->io); /* volume down but still "on" */
- msleep(2000); /* make sure it's totally down */
+ sleep_delay(2000000); /* make sure it's totally down */
outb(0xc0, rt->io); /* steady volume, mute card */
return 0;
.input = { {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = SAA7115_COMPOSITE0,
- .amux = EM28XX_AMUX_LINE_IN,
+ .amux = EM28XX_AMUX_VIDEO2,
}, {
.type = EM28XX_VMUX_SVIDEO,
.vmux = SAA7115_SVIDEO3,
- .amux = EM28XX_AMUX_LINE_IN,
+ .amux = EM28XX_AMUX_VIDEO2,
} },
},
[EM2860_BOARD_TERRATEC_AV350] = {
sn9c102_show_i2c_reg, sn9c102_store_i2c_reg);
static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR,
sn9c102_show_i2c_val, sn9c102_store_i2c_val);
-static DEVICE_ATTR(green, S_IWUSR, NULL, sn9c102_store_green);
-static DEVICE_ATTR(blue, S_IWUSR, NULL, sn9c102_store_blue);
-static DEVICE_ATTR(red, S_IWUSR, NULL, sn9c102_store_red);
+static DEVICE_ATTR(green, S_IWUGO, NULL, sn9c102_store_green);
+static DEVICE_ATTR(blue, S_IWUGO, NULL, sn9c102_store_blue);
+static DEVICE_ATTR(red, S_IWUGO, NULL, sn9c102_store_red);
static DEVICE_ATTR(frame_header, S_IRUGO, sn9c102_show_frame_header, NULL);
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl)
{
- struct uvc_format *format = NULL;
+ struct uvc_format *format;
struct uvc_frame *frame = NULL;
unsigned int i;
- for (i = 0; i < stream->nformats; ++i) {
- if (stream->format[i].index == ctrl->bFormatIndex) {
- format = &stream->format[i];
- break;
- }
- }
-
- if (format == NULL)
+ if (ctrl->bFormatIndex <= 0 ||
+ ctrl->bFormatIndex > stream->nformats)
return;
+ format = &stream->format[ctrl->bFormatIndex - 1];
+
for (i = 0; i < format->nframes; ++i) {
if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) {
frame = &format->frame[i];
return 1;
}
-static int
-mptctl_release(struct inode *inode, struct file *filep)
-{
- fasync_helper(-1, filep, 0, &async_queue);
- return 0;
-}
-
static int
mptctl_fasync(int fd, struct file *filep, int mode)
{
.llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
- .release = mptctl_release,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_mpctl_ioctl,
#endif
}
out:
- printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
- ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
- SCpnt, SCpnt->serial_number);
+ printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
return retval;
}
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
- retval = 0;
+ retval = SUCCESS;
goto out;
}
ab3100_get_priv.ab3100 = ab3100;
ab3100_get_priv.mode = false;
ab3100_get_reg_file = debugfs_create_file("get_reg",
- S_IWUSR, ab3100_dir, &ab3100_get_priv,
+ S_IWUGO, ab3100_dir, &ab3100_get_priv,
&ab3100_get_set_reg_fops);
if (!ab3100_get_reg_file) {
err = -ENOMEM;
ab3100_set_priv.ab3100 = ab3100;
ab3100_set_priv.mode = true;
ab3100_set_reg_file = debugfs_create_file("set_reg",
- S_IWUSR, ab3100_dir, &ab3100_set_priv,
+ S_IWUGO, ab3100_dir, &ab3100_set_priv,
&ab3100_get_set_reg_fops);
if (!ab3100_set_reg_file) {
err = -ENOMEM;
idev->close = ucb1x00_ts_close;
__set_bit(EV_ABS, idev->evbit);
+ __set_bit(ABS_X, idev->absbit);
+ __set_bit(ABS_Y, idev->absbit);
+ __set_bit(ABS_PRESSURE, idev->absbit);
input_set_drvdata(idev, ts);
- ucb1x00_adc_enable(ts->ucb);
- ts->x_res = ucb1x00_ts_read_xres(ts);
- ts->y_res = ucb1x00_ts_read_yres(ts);
- ucb1x00_adc_disable(ts->ucb);
-
- input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
- input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
- input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
-
err = input_register_device(idev);
if (err)
goto fail;
static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
-static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO,
ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
-static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO,
ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
-static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO,
ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
static struct attribute *ep93xx_pwm_attrs[] = {
host->ioaddr = pci_ioremap_bar(pdev, bar);
if (!host->ioaddr) {
dev_err(&pdev->dev, "failed to remap registers\n");
- ret = -ENOMEM;
goto release;
}
host = (struct sdhci_host*)param;
- /*
- * If this tasklet gets rescheduled while running, it will
- * be run again afterwards but without any active request.
- */
- if (!host->mrq)
- return;
-
spin_lock_irqsave(&host->lock, flags);
del_timer(&host->timer);
* upon error conditions.
*/
if (!(host->flags & SDHCI_DEVICE_DEAD) &&
- ((mrq->cmd && mrq->cmd->error) ||
+ (mrq->cmd->error ||
(mrq->data && (mrq->data->error ||
(mrq->data->stop && mrq->data->stop->error))) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
module_init(omap_nand_init);
module_exit(omap_nand_exit);
-MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_ALIAS(DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
.remove = __devexit_p(generic_onenand_remove),
};
-MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_ALIAS(DRIVER_NAME);
static int __init generic_onenand_init(void)
{
module_init(omap2_onenand_init);
module_exit(omap2_onenand_exit);
-MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_ALIAS(DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_FORCEDETH) += forcedeth.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_LP486E) += lp486e.o
obj-$(CONFIG_ETH16I) += eth16i.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_DECLANCE) += declance.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o
+obj-$(CONFIG_HYDRA) += hydra.o 8390.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_CS89x0) += cs89x0.o
obj-$(CONFIG_MACSONIC) += macsonic.o
else {
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
+ use_tpd = atl1c_get_tpd(adapter, type);
+ memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
}
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = buf_len - mapped_len;
ulong be_tx_jiffies;
u64 be_tx_bytes;
u64 be_tx_bytes_prev;
- u64 be_tx_pkts;
u32 be_tx_rate;
u32 cache_barrier[16];
ulong be_rx_jiffies;
u64 be_rx_bytes;
u64 be_rx_bytes_prev;
- u64 be_rx_pkts;
u32 be_rx_rate;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
struct net_device_stats *dev_stats = &adapter->stats.net_stats;
struct be_erx_stats *erx_stats = &hw_stats->erx;
- dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
- dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
- dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
- dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+ dev_stats->rx_packets = port_stats->rx_total_frames;
+ dev_stats->tx_packets = port_stats->tx_unicastframes +
+ port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
+ dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
+ (u64) port_stats->rx_bytes_lsd;
+ dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
+ (u64) port_stats->tx_bytes_lsd;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
}
static void be_tx_stats_update(struct be_adapter *adapter,
- u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+ u32 wrb_cnt, u32 copied, bool stopped)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
stats->be_tx_reqs++;
stats->be_tx_wrbs += wrb_cnt;
stats->be_tx_bytes += copied;
- stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
if (stopped)
stats->be_tx_stops++;
}
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(adapter, wrb_cnt, copied,
- skb_shinfo(skb)->gso_segs, stopped);
+ be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
stats->be_rx_compl++;
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
- stats->be_rx_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
#include <linux/prefetch.h>
#include <linux/zlib.h>
#include <linux/io.h>
-#include <linux/stringify.h>
#include "bnx2x.h"
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
/* FW files */
-#define FW_FILE_VERSION \
- __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
- __stringify(BCM_5710_FW_MINOR_VERSION) "." \
- __stringify(BCM_5710_FW_REVISION_VERSION) "." \
- __stringify(BCM_5710_FW_ENGINEERING_VERSION)
-#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
-#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
+#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
-MODULE_FIRMWARE(FW_FILE_NAME_E1);
-MODULE_FIRMWARE(FW_FILE_NAME_E1H);
static int multi_mode = 1;
module_param(multi_mode, int, 0);
static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
{
- const char *fw_file_name;
+ char fw_file_name[40] = {0};
struct bnx2x_fw_file_hdr *fw_hdr;
- int rc;
+ int rc, offset;
+ /* Create a FW file name */
if (CHIP_IS_E1(bp))
- fw_file_name = FW_FILE_NAME_E1;
+ offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
else
- fw_file_name = FW_FILE_NAME_E1H;
+ offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
+
+ sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
+ BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION,
+ BCM_5710_FW_ENGINEERING_VERSION);
printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
if (!(dev->flags & IFF_MASTER))
goto out;
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto out;
-
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
goto out;
goto out;
}
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto out;
-
if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
goto out;
};
struct sk_buff *skb;
- /* The Ethernet header is built in ndisc_send_skb(), not
- * ndisc_build_skb(), so we cannot insert a VLAN tag. Only an
- * out-of-line tag inserted by the hardware will work.
- */
- if (vlan_id && !(slave_dev->features & NETIF_F_HW_VLAN_TX))
- return;
-
icmp6h.icmp6_router = router;
icmp6h.icmp6_solicited = 0;
icmp6h.icmp6_override = 1;
}
if (vlan_id) {
- skb = __vlan_hwaccel_put_tag(skb, vlan_id);
+ skb = vlan_put_tag(skb, vlan_id);
if (!skb) {
pr_err(DRV_NAME ": failed to insert VLAN tag\n");
return;
if (!slave || !slave_do_arp_validate(bond, slave))
goto out_unlock;
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto out_unlock;
-
if (!pskb_may_pull(skb, arp_hdr_len(dev)))
goto out_unlock;
IRQ_NUM_STATS /* keep last */
};
-#define TP_VERSION_MAJOR 1
-#define TP_VERSION_MINOR 1
-#define TP_VERSION_MICRO 0
+enum {
+ TP_VERSION_MAJOR = 1,
+ TP_VERSION_MINOR = 1,
+ TP_VERSION_MICRO = 0
+};
#define S_TP_VERSION_MAJOR 16
#define M_TP_VERSION_MAJOR 0xFF
#include <linux/rtnetlink.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/stringify.h>
#include <asm/uaccess.h>
#include "common.h"
return err;
}
-#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
- __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
-#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
-#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
- __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
-#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
+#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
+#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
-MODULE_FIRMWARE(FW_FNAME);
-MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
-MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
-MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
-MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
-MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
static inline const char *get_edc_fw_name(int edc_idx)
{
static int upgrade_fw(struct adapter *adap)
{
int ret;
+ char buf[64];
const struct firmware *fw;
struct device *dev = &adap->pdev->dev;
- ret = request_firmware(&fw, FW_FNAME, dev);
+ snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
+ FW_VERSION_MINOR, FW_VERSION_MICRO);
+ ret = request_firmware(&fw, buf, dev);
if (ret < 0) {
dev_err(dev, "could not upgrade firmware: unable to load %s\n",
- FW_FNAME);
+ buf);
return ret;
}
ret = t3_load_fw(adap, fw->data, fw->size);
if (!rev)
return 0;
- snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
+ snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
ret = request_firmware(&tpsram, buf, dev);
if (ret < 0) {
#endif
};
-/**
- * e1000_init_hw_struct - initialize members of hw struct
- * @adapter: board private struct
- * @hw: structure used by e1000_hw.c
- *
- * Factors out initialization of the e1000_hw struct to its own function
- * that can be called very early at init (just after struct allocation).
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- * Returns negative error codes if MAC type setup fails.
- */
-static int e1000_init_hw_struct(struct e1000_adapter *adapter,
- struct e1000_hw *hw)
-{
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_id = pdev->subsystem_device;
- hw->revision_id = pdev->revision;
-
- pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
-
- hw->max_frame_size = adapter->netdev->mtu +
- ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
- hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
-
- /* identify the MAC */
- if (e1000_set_mac_type(hw)) {
- DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
- return -EIO;
- }
-
- switch (hw->mac_type) {
- default:
- break;
- case e1000_82541:
- case e1000_82547:
- case e1000_82541_rev_2:
- case e1000_82547_rev_2:
- hw->phy_init_script = 1;
- break;
- }
-
- e1000_set_media_type(hw);
- e1000_get_bus_info(hw);
-
- hw->wait_autoneg_complete = false;
- hw->tbi_compatibility_en = true;
- hw->adaptive_ifs = true;
-
- /* Copper options */
-
- if (hw->media_type == e1000_media_type_copper) {
- hw->mdix = AUTO_ALL_MODES;
- hw->disable_polarity_correction = false;
- hw->master_slave = E1000_MASTER_SLAVE;
- }
-
- return 0;
-}
-
/**
* e1000_probe - Device Initialization Routine
* @pdev: PCI device information struct
if (err)
return err;
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ E1000_ERR("No usable DMA configuration, "
+ "aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
if (err)
goto err_pci_reg;
}
}
- /* make ready for any if (hw->...) below */
- err = e1000_init_hw_struct(adapter, hw);
- if (err)
- goto err_sw_init;
-
- /*
- * there is a workaround being applied below that limits
- * 64-bit DMA addresses to 64-bit hardware. There are some
- * 32-bit adapters that Tx hang when given 64-bit DMA addresses
- */
- pci_using_dac = 0;
- if ((hw->bus_type == e1000_bus_type_pcix) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- /*
- * according to DMA-API-HOWTO, coherent calls will always
- * succeed if the set call did
- */
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- pci_using_dac = 1;
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else {
- E1000_ERR("No usable DMA configuration, aborting\n");
- goto err_dma;
- }
-
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
if (!is_valid_ether_addr(netdev->perm_addr))
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ e1000_get_bus_info(hw);
+
init_timer(&adapter->tx_fifo_stall_timer);
adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
iounmap(hw->flash_address);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
-err_dma:
err_sw_init:
iounmap(hw->hw_addr);
err_ioremap:
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
+err_dma:
pci_disable_device(pdev);
return err;
}
* @adapter: board private structure to initialize
*
* e1000_sw_init initializes the Adapter private data structure.
- * e1000_init_hw_struct MUST be called before this function
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
**/
static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ hw->max_frame_size = netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+
+ if (e1000_set_mac_type(hw)) {
+ DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+
+ hw->wait_autoneg_complete = false;
+ hw->tbi_compatibility_en = true;
+ hw->adaptive_ifs = true;
+
+ /* Copper options */
+
+ if (hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = false;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- /*
- * Reset the PHY before any acccess to it. Doing so, ensures that
- * the PHY is in a known good state before we read/write PHY registers.
- * The generic reset is sufficient here, because we haven't determined
- * the PHY type yet.
- */
- ret_val = e1000e_phy_hw_reset_generic(hw);
- if (ret_val)
- goto out;
-
phy->id = e1000_phy_unknown;
e1000e_get_phy_id(hw);
phy->type = e1000e_get_phy_type_from_id(phy->id);
phy->ops.commit_phy = e1000e_phy_sw_reset;
}
- out:
return ret_val;
}
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
- if ((hw->mac.type > e1000_ich10lan) &&
- (eeprom_data & E1000_WUC_PHY_WAKE))
+ if (eeprom_data & E1000_WUC_PHY_WAKE)
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
cmd->duplex = -1;
}
- if (cmd->speed == SPEED_10000) {
- cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- cmd->port = PORT_FIBRE;
- } else {
- cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
- | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
- | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
- | SUPPORTED_TP);
- cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
- | ADVERTISED_TP);
- cmd->port = PORT_TP;
- }
+ cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full
+ | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half
+ | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half
+ | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+
+ cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg
+ | ADVERTISED_FIBRE);
+ cmd->port = PORT_FIBRE;
cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
.ndo_open = hydra_open,
.ndo_stop = hydra_close,
- .ndo_start_xmit = __ei_start_xmit,
- .ndo_tx_timeout = __ei_tx_timeout,
- .ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = __ei_poll,
+ .ndo_poll_controller = ei_poll,
#endif
};
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
};
- dev = ____alloc_ei_netdev(0);
+ dev = alloc_ei_netdev();
if (!dev)
return -ENOMEM;
unsigned int page_offset;
};
};
+ struct page *page;
};
union igbvf_desc {
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
+ "D-Link NIC",
"D-Link NIC IP1000A"
};
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
{ PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
{ PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4020), 4 },
+ { PCI_VDEVICE(DLINK, 0x4000), 4 },
+ { PCI_VDEVICE(DLINK, 0x4020), 5 },
{ 0, }
};
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
- case IXGBE_DEV_ID_82599_SFP_EM:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
- board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
/* Decide whether to use packet split mode or not */
adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- /* Disable packet split due to 82599 erratum #45 */
- if (hw->mac.type == ixgbe_mac_82599EB)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
#define IXGBE_DEV_ID_82599_KR 0x1517
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
-#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
-MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
-MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
-MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
-MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
dma_free_coherent(&pdev->dev, bytes,
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
- netif_napi_del(&ss->napi);
}
}
kfree(mgp->ss);
#ifndef MODULE
struct net_device * __init ne_probe(int unit)
{
- struct net_device *dev = ____alloc_ei_netdev(0);
+ struct net_device *dev = alloc_ei_netdev();
int err;
if (!dev)
.ndo_open = ne_open,
.ndo_stop = ne_close,
- .ndo_start_xmit = __ei_start_xmit,
- .ndo_tx_timeout = __ei_tx_timeout,
- .ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = __ei_poll,
+ .ndo_poll_controller = ei_poll,
#endif
};
int err;
for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
- struct net_device *dev = ____alloc_ei_netdev(0);
+ struct net_device *dev = alloc_ei_netdev();
if (!dev)
break;
if (io[this_dev]) {
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" CARDNAME);
-MODULE_FIRMWARE("xc0.bin");
-MODULE_FIRMWARE("xc1.bin");
-MODULE_FIRMWARE("xc2.bin");
#define NX_P3_MN_ROMIMAGE 2
#define NX_FLASH_ROMIMAGE 3
-#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin"
-#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin"
-#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin"
-#define NX_FLASH_ROMIMAGE_NAME "flash"
-
extern char netxen_nic_driver_name[];
/* Number of status descriptors to handle per interrupt */
struct list_head *head;
nx_mac_list_t *cur;
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- return;
-
list_splice_tail_init(&adapter->mac_list, &del_list);
nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
}
static char *fw_name[] = {
- NX_P2_MN_ROMIMAGE_NAME,
- NX_P3_CT_ROMIMAGE_NAME,
- NX_P3_MN_ROMIMAGE_NAME,
- NX_FLASH_ROMIMAGE_NAME,
+ "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash",
};
int
MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
static char netxen_nic_driver_string[] = "NetXen Network Driver version "
struct niu_parent *parent = np->parent;
struct niu_tcam_entry *tp;
int i, idx, cnt;
+ u16 n_entries;
unsigned long flags;
- int ret = 0;
+
/* put the tcam size here */
nfc->data = tcam_get_size(np);
niu_lock_parent(np, flags);
+ n_entries = nfc->rule_cnt;
for (cnt = 0, i = 0; i < nfc->data; i++) {
idx = tcam_get_index(np, i);
tp = &parent->tcam[idx];
if (!tp->valid)
continue;
- if (cnt == nfc->rule_cnt) {
- ret = -EMSGSIZE;
- break;
- }
rule_locs[cnt] = i;
cnt++;
}
niu_unlock_parent(np, flags);
- return ret;
+ if (n_entries != cnt) {
+ /* print warning, this should not happen */
+ pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, "
+ "n_entries[%d] != cnt[%d]!!!\n\n",
+ np->parent->index, np->dev->name, n_entries, cnt);
+ }
+
+ return 0;
}
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
-MODULE_FIRMWARE("cis/PCMLM28.cis");
-MODULE_FIRMWARE("cis/DP83903.cis");
-MODULE_FIRMWARE("cis/LA-PCM.cis");
-MODULE_FIRMWARE("PE520.cis");
-MODULE_FIRMWARE("cis/NE2K.cis");
-MODULE_FIRMWARE("cis/PE-200.cis");
-MODULE_FIRMWARE("cis/tamarack.cis");
static struct pcmcia_driver pcnet_driver = {
.drv = {
/* Try to dequeue as many skbs from reorder_q as we can. */
pppol2tp_recv_dequeue(session);
- sock_put(sock);
return 0;
UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
tunnel->stats.rx_errors++;
kfree_skb(skb);
- sock_put(sock);
return 0;
if (tunnel_sock == NULL)
goto end;
- sock_hold(tunnel_sock);
tunnel = tunnel_sock->sk_user_data;
} else {
tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
-#include <linux/pci-aspm.h>
#include <asm/system.h>
#include <asm/io.h>
mii->reg_num_mask = 0x1f;
mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
/* Work around for rx fifo overflow */
if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22)) {
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
/* Done. We have linked the TTY line to a channel. */
rtnl_unlock();
tty->receive_room = 65536; /* We don't flow control */
-
- /* TTY layer expects 0 on success */
- return 0;
+ return sl->dev->base_addr;
err_free_bufs:
sl_free_bufs(sl);
MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(VERSION);
-MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
ENTER;
master = READ_REG(priv, regINIT_SEMAPHORE);
if (!READ_REG(priv, regINIT_STATUS) && master) {
- rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
+ rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
if (rc)
goto out;
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(BDX_DRV_DESC);
-MODULE_FIRMWARE("tehuti/bdx.bin");
+MODULE_FIRMWARE("tehuti/firmware.bin");
return (-1);
}
-MODULE_FIRMWARE("tms380tr.bin");
-
/*
* Starts bring up diagnostics of token ring adapter and evaluates
* diagnostic results.
{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
- { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ } /* terminate list */
};
}
}
-static void virtnet_napi_enable(struct virtnet_info *vi)
-{
- napi_enable(&vi->napi);
-
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
- __napi_schedule(&vi->napi);
- }
-}
-
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
- virtnet_napi_enable(vi);
+ napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
struct virtnet_info *vi = netdev_priv(dev);
int capacity;
+again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
/* This can happen with OOM and indirect buffers. */
if (unlikely(capacity < 0)) {
- if (net_ratelimit()) {
- if (likely(capacity == -ENOMEM)) {
- dev_warn(&dev->dev,
- "TX queue failure: out of memory\n");
- } else {
- dev->stats.tx_fifo_errors++;
- dev_warn(&dev->dev,
- "Unexpected TX queue failure: %d\n",
- capacity);
- }
+ netif_stop_queue(dev);
+ dev_warn(&dev->dev, "Unexpected full queue\n");
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ vi->svq->vq_ops->disable_cb(vi->svq);
+ netif_start_queue(dev);
+ goto again;
}
- dev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_BUSY;
}
vi->svq->vq_ops->kick(vi->svq);
{
struct virtnet_info *vi = netdev_priv(dev);
- virtnet_napi_enable(vi);
+ napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (napi_schedule_prep(&vi->napi)) {
+ vi->rvq->vq_ops->disable_cb(vi->rvq);
+ __napi_schedule(&vi->napi);
+ }
return 0;
}
/* toggle the LRO feature*/
netdev->features ^= NETIF_F_LRO;
- /* Update private LRO flag */
- adapter->lro = lro_requested;
-
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO;
board = z->resource.start;
ioaddr = board+cards[i].offset;
- dev = ____alloc_ei_netdev(0);
+ dev = alloc_ei_netdev();
if (!dev)
return -ENOMEM;
if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_open = zorro8390_open,
.ndo_stop = zorro8390_close,
- .ndo_start_xmit = __ei_start_xmit,
- .ndo_tx_timeout = __ei_tx_timeout,
- .ndo_get_stats = __ei_get_stats,
- .ndo_set_multicast_list = __ei_set_multicast_list,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = __ei_poll,
+ .ndo_poll_controller = ei_poll,
#endif
};
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
- pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
int dmar_disabled = 1;
#endif /*CONFIG_DMAR_DEFAULT_ON*/
-static int dmar_map_gfx = 1;
+static int __initdata dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
ret = iommu_attach_domain(domain, iommu);
if (ret) {
- free_domain_mem(domain);
+ domain_exit(domain);
goto error;
}
if (!domain)
return 0;
- if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
+ if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
domain_remove_one_dev_info(domain, pdev);
- if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
- list_empty(&domain->devices))
- domain_exit(domain);
- }
-
return 0;
}
domain->iommu_count--;
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
-
- spin_lock_irqsave(&iommu->lock, tmp_flags);
- clear_bit(domain->id, iommu->domain_ids);
- iommu->domains[domain->id] = NULL;
- spin_unlock_irqrestore(&iommu->lock, tmp_flags);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
-
- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
- if (dev->revision == 0x07) {
- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
- }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
subdevice = PCI_ANY_ID, class=0, class_mask=0;
int fields;
- if (!strlen(id))
- continue;
-
fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
&vendor, &device, &subvendor, &subdevice,
&class, &class_mask);
if (val != 1)
return -EINVAL;
-
- result = pci_reset_function(pdev);
- if (result < 0)
- return result;
-
- return count;
+ return pci_reset_function(pdev);
}
static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
attr->write = write_vpd_attr;
retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
if (retval) {
- kfree(attr);
+ kfree(dev->vpd->attr);
return retval;
}
dev->vpd->attr = attr;
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
-#define ICH_PMBASE 0x40
-#define ICH_ACPI_CNTL 0x44
-#define ICH4_ACPI_EN 0x10
-#define ICH6_ACPI_EN 0x80
-#define ICH4_GPIOBASE 0x58
-#define ICH4_GPIO_CNTL 0x5c
-#define ICH4_GPIO_EN 0x10
-#define ICH6_GPIOBASE 0x48
-#define ICH6_GPIO_CNTL 0x4c
-#define ICH6_GPIO_EN 0x10
-
/*
* ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
* 0x40 (128 bytes of ACPI, GPIO & TCO registers)
static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
{
u32 region;
- u8 enable;
- /*
- * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
- * with low legacy (and fixed) ports. We don't know the decoding
- * priority and can't tell whether the legacy device or the one created
- * here is really at that address. This happens on boards with broken
- * BIOSes.
- */
-
- pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
- if (enable & ICH4_ACPI_EN) {
- pci_read_config_dword(dev, ICH_PMBASE, ®ion);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES,
- "ICH4 ACPI/GPIO/TCO");
- }
+ pci_read_config_dword(dev, 0x40, ®ion);
+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
- pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
- if (enable & ICH4_GPIO_EN) {
- pci_read_config_dword(dev, ICH4_GPIOBASE, ®ion);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 64,
- PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO");
- }
+ pci_read_config_dword(dev, 0x58, ®ion);
+ quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev)
{
u32 region;
- u8 enable;
- pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
- if (enable & ICH6_ACPI_EN) {
- pci_read_config_dword(dev, ICH_PMBASE, ®ion);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES,
- "ICH6 ACPI/GPIO/TCO");
- }
+ pci_read_config_dword(dev, 0x40, ®ion);
+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO");
- pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
- if (enable & ICH4_GPIO_EN) {
- pci_read_config_dword(dev, ICH6_GPIOBASE, ®ion);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- if (region >= PCIBIOS_MIN_IO)
- quirk_io_region(dev, region, 64,
- PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO");
- }
+ pci_read_config_dword(dev, 0x48, ®ion);
+ quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
}
static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_PCI_IOV
+
+/*
+ * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the
+ * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the
+ * old Flash Memory Space.
+ */
+static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
+{
+ int pos, flags;
+ u32 bar, start, size;
+
+ if (PAGE_SIZE > 0x10000)
+ return;
+
+ flags = pci_resource_flags(dev, 0);
+ if ((flags & PCI_BASE_ADDRESS_SPACE) !=
+ PCI_BASE_ADDRESS_SPACE_MEMORY ||
+ (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) !=
+ PCI_BASE_ADDRESS_MEM_TYPE_32)
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar);
+ if (bar & PCI_BASE_ADDRESS_MEM_MASK)
+ return;
+
+ start = pci_resource_start(dev, 1);
+ size = pci_resource_len(dev, 1);
+ if (!start || size != 0x400000 || start & (size - 1))
+ return;
+
+ pci_resource_flags(dev, 1) = 0;
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
+ pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start);
+ pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2);
+
+ dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
+
+#endif /* CONFIG_PCI_IOV */
+
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
*/
#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
return -EINVAL;
return count;
}
-static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
set_bool_threeg);
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
struct proc_dir_entry *proc;
mode_t mode;
+ /*
+ * If parameter uid or gid is not changed, keep the default setting for
+ * our proc entries (-rw-rw-rw-) else, it means we care about security,
+ * and then set to -rw-rw----
+ */
+
if ((asus_uid == 0) && (asus_gid == 0)) {
- mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
+ mode = S_IFREG | S_IRUGO | S_IWUGO;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
},
},
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/
- },
- },
- {
- .ident = "Dell Computer Corporation",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
- },
- },
{ }
};
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("dmi:*svnDellInc.:*:ct8:*");
-MODULE_ALIAS("dmi:*svnDellInc.:*:ct9:*");
-MODULE_ALIAS("dmi:*svnDellComputerCorporation.:*:ct8:*");
return -EINVAL; \
return count; \
} \
-static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
show_bool_##value, set_bool_##value);
show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
if (di->rem_capacity > 100)
di->rem_capacity = 100;
- if (di->current_uA < -100L)
+ if (di->current_uA >= 100L)
di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
/ (di->current_uA / 100L);
else
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
-#include <linux/pm.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
#ifdef CONFIG_PM
-static int cmos_suspend(struct device *dev)
+static int cmos_suspend(struct device *dev, pm_message_t mesg)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
*/
static inline int cmos_poweroff(struct device *dev)
{
- return cmos_suspend(dev);
+ return cmos_suspend(dev, PMSG_HIBERNATE);
}
static int cmos_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
-
#else
+#define cmos_suspend NULL
+#define cmos_resume NULL
static inline int cmos_poweroff(struct device *dev)
{
static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
{
- return cmos_suspend(&pnp->dev);
+ return cmos_suspend(&pnp->dev, mesg);
}
static int cmos_pnp_resume(struct pnp_dev *pnp)
.shutdown = cmos_platform_shutdown,
.driver = {
.name = (char *) driver_name,
-#ifdef CONFIG_PM
- .pm = &cmos_pm_ops,
-#endif
+ .suspend = cmos_suspend,
+ .resume = cmos_resume,
}
};
static struct bin_attribute ds1511_nvram_attr = {
.attr = {
.name = "nvram",
- .mode = S_IRUGO | S_IWUSR,
+ .mode = S_IRUGO | S_IWUGO,
},
.size = DS1511_RAM_MAX,
.read = ds1511_nvram_read,
static struct ccw_device_id dasd_eckd_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
- { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
unsigned int cmd, unsigned long arg)
{
void __user *argp;
- unsigned int ct;
- int perm;
+ int ct, perm;
argp = (void __user *)arg;
{"IBM", "2145" },
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
- {"NETAPP", "LUN"},
- {"AIX", "NVDISK"},
{NULL, NULL}
};
int len = 0;
rq = blk_get_request(sdev->request_queue,
- (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
+ (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
return NULL;
switch (cmd) {
case MODE_SELECT:
len = sizeof(short_trespass);
+ rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
- rq->cmd[4] = len;
break;
case MODE_SELECT_10:
len = sizeof(long_trespass);
+ rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
- rq->cmd[8] = len;
break;
case INQUIRY:
len = CLARIION_BUFFER_SIZE;
- rq->cmd[4] = len;
memset(buffer, 0, len);
break;
default:
break;
}
+ rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
- shost->host_eh_scheduled = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s\n", __func__);
* For each user buffer, create a mirror buffer and copy in
*/
for (i = 0; i < ioc->sge_count; i++) {
- if (!ioc->sgl[i].iov_len)
- continue;
-
kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
ioc->sgl[i].iov_len,
&buf_handle, GFP_KERNEL);
/* adjust hba_queue_depth, reply_free_queue_depth,
* and queue_size
*/
- ioc->hba_queue_depth -= (queue_diff / 2);
- ioc->reply_free_queue_depth -= (queue_diff / 2);
- queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+ ioc->hba_queue_depth -= queue_diff;
+ ioc->reply_free_queue_depth -= queue_diff;
+ queue_size -= queue_diff;
}
ioc->reply_post_queue_depth = queue_size;
static void
_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
{
- mpt2sas_scsih_reset_handler(ioc, reset_phase);
- mpt2sas_ctl_reset_handler(ioc, reset_phase);
switch (reset_phase) {
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
break;
}
+ mpt2sas_scsih_reset_handler(ioc, reset_phase);
+ mpt2sas_ctl_reset_handler(ioc, reset_phase);
}
/**
{
int r;
unsigned long flags;
- u8 pe_complete = ioc->wait_for_port_enable_to_complete;
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
__func__));
if (r)
goto out;
_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
-
- /* If this hard reset is called while port enable is active, then
- * there is no reason to call make_ioc_operational
- */
- if (pe_complete) {
- r = -EFAULT;
- goto out;
- }
r = _base_make_ioc_operational(ioc, sleep_flag);
if (!r)
_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
data_out_sz = karg.data_out_size;
data_in_sz = karg.data_in_size;
- /* Check for overflow and wraparound */
- if (karg.data_sge_offset * 4 > ioc->request_sz ||
- karg.data_sge_offset > (UINT_MAX / 4)) {
- ret = -EINVAL;
- goto out;
- }
-
/* copy in request message frame from user */
if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__,
Mpi2DiagBufferPostReply_t *mpi_reply;
int rc, i;
u8 buffer_type;
- unsigned long timeleft, request_size, copy_size;
+ unsigned long timeleft;
u16 smid;
u16 ioc_status;
u8 issue_reset = 0;
return -ENOMEM;
}
- request_size = ioc->diag_buffer_sz[buffer_type];
-
if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
printk(MPT2SAS_ERR_FMT "%s: either the starting_offset "
"or bytes_to_read are not 4 byte aligned\n", ioc->name,
return -EINVAL;
}
- if (karg.starting_offset > request_size)
- return -EINVAL;
-
diag_data = (void *)(request_data + karg.starting_offset);
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(%p), "
"offset(%d), sz(%d)\n", ioc->name, __func__,
diag_data, karg.starting_offset, karg.bytes_to_read));
- /* Truncate data on requests that are too large */
- if ((diag_data + karg.bytes_to_read < diag_data) ||
- (diag_data + karg.bytes_to_read > request_data + request_size))
- copy_size = request_size - karg.starting_offset;
- else
- copy_size = karg.bytes_to_read;
-
if (copy_to_user((void __user *)uarg->diagnostic_data,
- diag_data, copy_size)) {
+ diag_data, karg.bytes_to_read)) {
printk(MPT2SAS_ERR_FMT "%s: Unable to write "
"mpt_diag_read_buffer_t data @ %p\n", ioc->name,
__func__, diag_data);
u32 chain_offset;
u32 chain_length;
u32 chain_flags;
- int sges_left;
+ u32 sges_left;
u32 sges_in_segment;
u32 sgl_flags;
u32 sgl_flags_last_element;
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
+ if (!sges_left) {
sdev_printk(KERN_ERR, scmd->device, "pci_map_sg"
" failed: request for %d bytes!\n", scsi_bufflen(scmd));
return -ENOMEM;
u16 handle;
for (i = 0 ; i < event_data->NumEntries; i++) {
+ if (event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
+ continue;
handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
if (!handle)
continue;
rc = -EFAULT;
goto out_free_buffer;
}
- } else if (request_size < 0) {
- rc = -EINVAL;
- goto out_free_buffer;
}
/* check if we have any additional command parameters */
static void scsi_run_queue(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
- struct Scsi_Host *shost;
+ struct Scsi_Host *shost = sdev->host;
LIST_HEAD(starved_list);
unsigned long flags;
- /* if the device is dead, sdev will be NULL, so no queue to run */
- if (!sdev)
- return;
-
- shost = sdev->host;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
kfree(evt);
}
- /* NULL queue means the device can't be used */
- sdev->request_queue = NULL;
+ if (sdev->request_queue) {
+ sdev->request_queue->queuedata = NULL;
+ /* user context needed to free queue */
+ scsi_free_queue(sdev->request_queue);
+ /* temporary expedient, try to catch use of queue lock
+ * after free of sdev */
+ sdev->request_queue = NULL;
+ }
scsi_target_reap(scsi_target(sdev));
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
transport_destroy_device(dev);
-
- /* cause the request function to reject all I/O requests */
- sdev->request_queue->queuedata = NULL;
-
- /* Freeing the queue signals to block that we're done */
- scsi_free_queue(sdev->request_queue);
put_device(dev);
}
u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
u64 bad_lba;
int info_valid;
- /*
- * resid is optional but mostly filled in. When it's unused,
- * its value is zero, so we assume the whole buffer transferred
- */
- unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
- unsigned int good_bytes;
if (!blk_fs_request(scmd->request))
return 0;
/* This computation should always be done in terms of
* the resolution of the device's medium.
*/
- good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
- return min(good_bytes, transferred);
+ return (bad_lba - start_lba) * scmd->device->sector_size;
}
/**
len = (desc_ptr[2] << 8) + desc_ptr[3];
/* skip past overall descriptor */
desc_ptr += len + 4;
+ if (ses_dev->page10)
+ addl_desc_ptr = ses_dev->page10 + 8;
}
- if (ses_dev->page10)
- addl_desc_ptr = ses_dev->page10 + 8;
type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
components = 0;
for (i = 0; i < types; i++, type_ptr += 4) {
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- /* UART_CAP_EFR breaks billionon CF bluetooth card. */
- .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
},
[PORT_RSA] = {
.name = "RSA",
static irqreturn_t imx_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
- unsigned int val;
+ unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
writel(USR1_RTSD, sport->port.membase + USR1);
- val = readl(sport->port.membase + USR1) & USR1_RTSS;
uart_handle_cts_change(&sport->port, !!val);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
#define PCI_VENDOR_ID_JR3 0x1762
#define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
-#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
#define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
#define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
#define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
{
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
devpriv->n_channels = 1;
}
break;
- case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{
- devpriv->n_channels = 1;
- }
- break;
case PCI_DEVICE_ID_JR3_2_CHANNEL:{
devpriv->n_channels = 2;
}
}
devpriv->pci_enabled = 1;
- devpriv->iobase = ioremap(pci_resource_start(card, 0),
- offsetof(struct jr3_t, channel[devpriv->n_channels]));
- if (!devpriv->iobase)
- return -ENOMEM;
-
+ devpriv->iobase =
+ ioremap(pci_resource_start(card, 0), sizeof(struct jr3_t));
result = alloc_subdevices(dev, devpriv->n_channels);
if (result < 0)
goto out;
/* grab our IRQ */
if (irq) {
isr_flags = 0;
- if (thisboard->bustype == pci_bustype
- || thisboard->bustype == pcmcia_bustype)
+ if (thisboard->bustype == pci_bustype)
isr_flags |= IRQF_SHARED;
if (request_irq(irq, labpc_interrupt, isr_flags,
driver_labpc.driver_name, dev)) {
if (Channel->OfferMsg.MonitorAllocated) {
/* Each u32 represents 32 channels */
- sync_set_bit(Channel->OfferMsg.ChildRelId & 31,
+ set_bit(Channel->OfferMsg.ChildRelId & 31,
(unsigned long *) gVmbusConnection.SendInterruptPage +
(Channel->OfferMsg.ChildRelId >> 5));
monitorPage = gVmbusConnection.MonitorPages;
monitorPage++; /* Get the child to parent monitor page */
- sync_set_bit(Channel->MonitorBit,
+ set_bit(Channel->MonitorBit,
(unsigned long *)&monitorPage->TriggerGroup
[Channel->MonitorGroup].Pending);
if (Channel->OfferMsg.MonitorAllocated) {
/* Each u32 represents 32 channels */
- sync_clear_bit(Channel->OfferMsg.ChildRelId & 31,
+ clear_bit(Channel->OfferMsg.ChildRelId & 31,
(unsigned long *)gVmbusConnection.SendInterruptPage +
(Channel->OfferMsg.ChildRelId >> 5));
(struct hv_monitor_page *)gVmbusConnection.MonitorPages;
monitorPage++; /* Get the child to parent monitor page */
- sync_clear_bit(Channel->MonitorBit,
+ clear_bit(Channel->MonitorBit,
(unsigned long *)&monitorPage->TriggerGroup
[Channel->MonitorGroup].Pending);
}
for (dword = 0; dword < maxdword; dword++) {
if (recvInterruptPage[dword]) {
for (bit = 0; bit < 32; bit++) {
- if (sync_test_and_clear_bit(bit,
- (unsigned long *)
- &recvInterruptPage[dword])) {
+ if (test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) {
relid = (dword << 5) + bit;
DPRINT_DBG(VMBUS, "event detected for relid - %d", relid);
DPRINT_ENTER(VMBUS);
/* Each u32 represents 32 channels */
- sync_set_bit(childRelId & 31,
+ set_bit(childRelId & 31,
(unsigned long *)gVmbusConnection.SendInterruptPage +
(childRelId >> 5));
event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
/* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0, (unsigned long *) &event->Flags32[0])) {
+ if (test_and_clear_bit(0, (unsigned long *) &event->Flags32[0])) {
DPRINT_DBG(VMBUS, "received event %d", event->Flags32[0]);
ret |= 0x2;
}
#include "ChannelInterface.h"
#include "RingBuffer.h"
#include <linux/list.h>
-#include <asm/sync_bitops.h>
/*
blkdev->gd->first_minor = 0;
blkdev->gd->fops = &block_ops;
blkdev->gd->private_data = blkdev;
- blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
blkvsc_do_inquiry(blkdev);
/* point back to our device context */
struct device_context *device_ctx;
struct net_device_stats stats;
- struct work_struct work;
};
struct netvsc_driver_context {
{
struct device_context *device_ctx = to_device_context(device_obj);
struct net_device *net = dev_get_drvdata(&device_ctx->device);
- struct net_device_context *ndev_ctx;
DPRINT_ENTER(NETVSC_DRV);
if (status == 1) {
netif_carrier_on(net);
netif_wake_queue(net);
- netif_notify_peers(net);
- ndev_ctx = netdev_priv(net);
- schedule_work(&ndev_ctx->work);
} else {
netif_carrier_off(net);
netif_stop_queue(net);
.ndo_set_mac_address = eth_mac_addr,
};
-/*
- * Send GARP packet to network peers after migrations.
- * After Quick Migration, the network is not immediately operational in the
- * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
- * another netif_notify_peers() into a scheduled work, otherwise GARP packet
- * will not be sent after quick migration, and cause network disconnection.
- */
-static void netvsc_send_garp(struct work_struct *w)
-{
- struct net_device_context *ndev_ctx;
- struct net_device *net;
-
- msleep(20);
- ndev_ctx = container_of(w, struct net_device_context, work);
- net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
- netif_notify_peers(net);
-}
-
-
static int netvsc_probe(struct device *device)
{
struct driver_context *driver_ctx =
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = device_ctx;
dev_set_drvdata(device, net);
- INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
/* Notify the netvsc driver of the new device */
ret = net_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
/* Allocate skb buffer to contain firmware info and tx descriptor info. */
skb = dev_alloc_skb(frag_length);
- if (skb == NULL) {
- RT_TRACE(COMP_ERR, "(%s): unable to alloc skb buffer\n",
- __func__);
- goto cmdsend_downloadcode_fail;
- }
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
*/
#include <linux/vmalloc.h>
-#include <linux/notifier.h>
#undef LOOP_TEST
#undef DUMP_RX
#define CAM_CONTENT_COUNT 8
static struct usb_device_id rtl8192_usb_id_tbl[] = {
- {USB_DEVICE(0x0bda, 0x8171)}, /* Realtek */
- {USB_DEVICE(0x0bda, 0x8172)},
- {USB_DEVICE(0x0bda, 0x8173)},
- {USB_DEVICE(0x0bda, 0x8174)},
- {USB_DEVICE(0x0bda, 0x8712)},
- {USB_DEVICE(0x0bda, 0x8713)},
- {USB_DEVICE(0x07aa, 0x0047)},
- {USB_DEVICE(0x07d1, 0x3303)},
- {USB_DEVICE(0x07d1, 0x3302)},
- {USB_DEVICE(0x07d1, 0x3300)},
- {USB_DEVICE(0x1740, 0x9603)},
- {USB_DEVICE(0x1740, 0x9605)},
- {USB_DEVICE(0x050d, 0x815F)},
- {USB_DEVICE(0x06f8, 0xe031)},
- {USB_DEVICE(0x7392, 0x7611)},
- {USB_DEVICE(0x7392, 0x7612)},
- {USB_DEVICE(0x7392, 0x7622)},
- {USB_DEVICE(0x0DF6, 0x0045)},
- {USB_DEVICE(0x0E66, 0x0015)},
- {USB_DEVICE(0x0E66, 0x0016)},
- {USB_DEVICE(0x0b05, 0x1786)},
- /* these are not in the official list */
+ /* Realtek */
+ {USB_DEVICE(0x0bda, 0x8171)},
+ {USB_DEVICE(0x0bda, 0x8192)},
+ {USB_DEVICE(0x0bda, 0x8709)},
+ /* Corega */
+ {USB_DEVICE(0x07aa, 0x0043)},
+ /* Belkin */
+ {USB_DEVICE(0x050d, 0x805E)},
{USB_DEVICE(0x050d, 0x815F)}, /* Belkin F5D8053 v6 */
- {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */
+ /* Sitecom */
+ {USB_DEVICE(0x0df6, 0x0031)},
+ {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */
+ /* EnGenius */
+ {USB_DEVICE(0x1740, 0x9201)},
+ /* Dlink */
+ {USB_DEVICE(0x2001, 0x3301)},
+ /* Zinwell */
+ {USB_DEVICE(0x5a57, 0x0290)},
+ /* Guillemot */
+ {USB_DEVICE(0x06f8, 0xe031)},
+ //92SU
+ {USB_DEVICE(0x0bda, 0x8172)},
{}
};
static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void __devexit rtl8192_usb_disconnect(struct usb_interface *intf);
-static const struct net_device_ops rtl8192_netdev_ops;
-static struct notifier_block proc_netdev_notifier;
static struct usb_driver rtl8192_usb_driver = {
.name = RTL819xU_MODULE_NAME, /* Driver name */
return len;
}
-int rtl8192_proc_module_init(void)
+void rtl8192_proc_module_init(void)
{
- int ret;
-
RT_TRACE(COMP_INIT, "Initializing proc filesystem");
rtl8192_proc=create_proc_entry(RTL819xU_MODULE_NAME, S_IFDIR, init_net.proc_net);
- if (!rtl8192_proc)
- return -ENOMEM;
- ret = register_netdevice_notifier(&proc_netdev_notifier);
- if (ret)
- remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
- return ret;
}
void rtl8192_proc_module_remove(void)
{
- unregister_netdevice_notifier(&proc_netdev_notifier);
remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
}
remove_proc_entry("registers-e", priv->dir_dev);
// remove_proc_entry("cck-registers",priv->dir_dev);
// remove_proc_entry("ofdm-registers",priv->dir_dev);
- remove_proc_entry(priv->dir_dev->name, rtl8192_proc);
+ //remove_proc_entry(dev->name, rtl8192_proc);
+ remove_proc_entry("wlan0", rtl8192_proc);
priv->dir_dev = NULL;
}
}
dev->name);
}
}
-
-static int proc_netdev_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct net_device *net_dev = ptr;
-
- if (net_dev->netdev_ops == &rtl8192_netdev_ops &&
- event == NETDEV_CHANGENAME) {
- rtl8192_proc_remove_one(net_dev);
- rtl8192_proc_init_one(net_dev);
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block proc_netdev_notifier = {
- .notifier_call = proc_netdev_event,
-};
-
/****************************************************************************
-----------------------------MISC STUFF-------------------------
*****************************************************************************/
ret = ieee80211_crypto_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_init() failed %d\n", ret);
- goto fail_crypto;
+ return ret;
}
ret = ieee80211_crypto_tkip_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_tkip_init() failed %d\n",
ret);
- goto fail_crypto_tkip;
+ return ret;
}
ret = ieee80211_crypto_ccmp_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_ccmp_init() failed %d\n",
ret);
- goto fail_crypto_ccmp;
+ return ret;
}
ret = ieee80211_crypto_wep_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_wep_init() failed %d\n", ret);
- goto fail_crypto_wep;
+ return ret;
}
printk(KERN_INFO "\nLinux kernel driver for RTL8192 based WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan\n");
RT_TRACE(COMP_INIT, "Initializing module");
RT_TRACE(COMP_INIT, "Wireless extensions version %d", WIRELESS_EXT);
-
- ret = rtl8192_proc_module_init();
- if (ret) {
- pr_err("rtl8192_proc_module_init() failed %d\n", ret);
- goto fail_proc;
- }
-
- ret = usb_register(&rtl8192_usb_driver);
- if (ret) {
- pr_err("usb_register() failed %d\n", ret);
- goto fail_usb;
- }
-
- return 0;
-
-fail_usb:
- rtl8192_proc_module_remove();
-fail_proc:
- ieee80211_crypto_wep_exit();
-fail_crypto_wep:
- ieee80211_crypto_ccmp_exit();
-fail_crypto_ccmp:
- ieee80211_crypto_tkip_exit();
-fail_crypto_tkip:
- ieee80211_crypto_deinit();
-fail_crypto:
-#ifdef CONFIG_IEEE80211_DEBUG
- ieee80211_debug_exit();
-#endif
- return ret;
+ rtl8192_proc_module_init();
+ return usb_register(&rtl8192_usb_driver);
}
//Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
- if (skb == NULL) {
- RT_TRACE(COMP_ERR, "(%s): unable to alloc skb buffer\n",
- __func__);
- rtStatus = false;
- return rtStatus;
- }
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
static int tweak_reset_device_cmd(struct urb *urb)
{
- struct stub_priv *priv = (struct stub_priv *) urb->context;
- struct stub_device *sdev = priv->sdev;
+ struct usb_ctrlrequest *req;
+ __u16 value;
+ __u16 index;
+ int ret;
- usbip_uinfo("reset_device %s\n", dev_name(&urb->dev->dev));
+ req = (struct usb_ctrlrequest *) urb->setup_packet;
+ value = le16_to_cpu(req->wValue);
+ index = le16_to_cpu(req->wIndex);
- /*
- * usb_lock_device_for_reset caused a deadlock: it causes the driver
- * to unbind. In the shutdown the rx thread is signalled to shut down
- * but this thread is pending in the usb_lock_device_for_reset.
- *
- * Instead queue the reset.
- *
- * Unfortunatly an existing usbip connection will be dropped due to
- * driver unbinding.
- */
- usb_queue_reset_device(sdev->interface);
- return 0;
+ usbip_uinfo("reset_device (port %d) to %s\n", index,
+ dev_name(&urb->dev->dev));
+
+ /* all interfaces should be owned by usbip driver, so just reset it. */
+ ret = usb_lock_device_for_reset(urb->dev, NULL);
+ if (ret < 0) {
+ dev_err(&urb->dev->dev, "lock for reset\n");
+ return ret;
+ }
+
+ /* try to reset the device */
+ ret = usb_reset_device(urb->dev);
+ if (ret < 0)
+ dev_err(&urb->dev->dev, "device reset\n");
+
+ usb_unlock_device(urb->dev);
+
+ return ret;
}
/*
struct stub_priv *priv, *tmp;
struct msghdr msg;
+ struct kvec iov[3];
size_t txsize;
size_t total_size = 0;
struct urb *urb = priv->urb;
struct usbip_header pdu_header;
void *iso_buffer = NULL;
- struct kvec *iov = NULL;
- int iovnum = 0;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
+ memset(&iov, 0, sizeof(iov));
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
- iovnum = 2 + urb->number_of_packets;
- else
- iovnum = 2;
-
- iov = kzalloc(iovnum * sizeof(struct kvec), GFP_KERNEL);
+ usbip_dbg_stub_tx("setup txdata urb %p\n", urb);
- if (!iov) {
- usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
- return -1;
- }
-
- iovnum = 0;
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb);
- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
- pdu_header.base.seqnum, urb);
- /*usbip_dump_header(pdu_header);*/
usbip_header_correct_endian(&pdu_header, 1);
- iov[iovnum].iov_base = &pdu_header;
- iov[iovnum].iov_len = sizeof(pdu_header);
- iovnum++;
+ iov[0].iov_base = &pdu_header;
+ iov[0].iov_len = sizeof(pdu_header);
txsize += sizeof(pdu_header);
/* 2. setup transfer buffer */
- if (usb_pipein(urb->pipe) &&
- usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
- urb->actual_length > 0) {
- iov[iovnum].iov_base = urb->transfer_buffer;
- iov[iovnum].iov_len = urb->actual_length;
- iovnum++;
+ if (usb_pipein(urb->pipe) && urb->actual_length > 0) {
+ iov[1].iov_base = urb->transfer_buffer;
+ iov[1].iov_len = urb->actual_length;
txsize += urb->actual_length;
- } else if (usb_pipein(urb->pipe) &&
- usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- /*
- * For isochronous packets: actual length is the sum of
- * the actual length of the individual, packets, but as
- * the packet offsets are not changed there will be
- * padding between the packets. To optimally use the
- * bandwidth the padding is not transmitted.
- */
-
- int i;
- for (i = 0; i < urb->number_of_packets; i++) {
- iov[iovnum].iov_base = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
- iov[iovnum].iov_len = urb->iso_frame_desc[i].actual_length;
- iovnum++;
- txsize += urb->iso_frame_desc[i].actual_length;
- }
-
- if (txsize != sizeof(pdu_header) + urb->actual_length) {
- dev_err(&sdev->interface->dev,
- "actual length of urb (%d) does not match iso packet sizes (%d)\n",
- urb->actual_length, txsize-sizeof(pdu_header));
- kfree(iov);
- usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
- return -1;
- }
}
/* 3. setup iso_packet_descriptor */
if (!iso_buffer) {
usbip_event_add(&sdev->ud,
SDEV_EVENT_ERROR_MALLOC);
- kfree(iov);
return -1;
}
- iov[iovnum].iov_base = iso_buffer;
- iov[iovnum].iov_len = len;
+ iov[2].iov_base = iso_buffer;
+ iov[2].iov_len = len;
txsize += len;
- iovnum++;
}
- ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
- iov, iovnum, txsize);
+ ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
+ 3, txsize);
if (ret != txsize) {
dev_err(&sdev->interface->dev,
"sendmsg failed!, retval %d for %zd\n",
ret, txsize);
- kfree(iov);
kfree(iso_buffer);
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
return -1;
}
- kfree(iov);
kfree(iso_buffer);
+ usbip_dbg_stub_tx("send txdata\n");
total_size += txsize;
}
+
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum);
break;
case USBIP_RET_SUBMIT:
- usbip_udbg("RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n",
+ usbip_udbg("RET_SUBMIT: st %d al %u sf %d ec %d\n",
pdu->u.ret_submit.status,
pdu->u.ret_submit.actual_length,
pdu->u.ret_submit.start_frame,
- pdu->u.ret_submit.number_of_packets,
pdu->u.ret_submit.error_count);
case USBIP_RET_UNLINK:
usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status);
rpdu->status = urb->status;
rpdu->actual_length = urb->actual_length;
rpdu->start_frame = urb->start_frame;
- rpdu->number_of_packets = urb->number_of_packets;
rpdu->error_count = urb->error_count;
} else {
/* vhci_rx.c */
urb->status = rpdu->status;
urb->actual_length = rpdu->actual_length;
urb->start_frame = rpdu->start_frame;
- urb->number_of_packets = rpdu->number_of_packets;
urb->error_count = rpdu->error_count;
}
}
cpu_to_be32s(&pdu->status);
cpu_to_be32s(&pdu->actual_length);
cpu_to_be32s(&pdu->start_frame);
- cpu_to_be32s(&pdu->number_of_packets);
cpu_to_be32s(&pdu->error_count);
} else {
be32_to_cpus(&pdu->status);
be32_to_cpus(&pdu->actual_length);
be32_to_cpus(&pdu->start_frame);
- cpu_to_be32s(&pdu->number_of_packets);
be32_to_cpus(&pdu->error_count);
}
}
int size = np * sizeof(*iso);
int i;
int ret;
- int total_length = 0;
if (!usb_pipeisoc(urb->pipe))
return 0;
return -EPIPE;
}
-
for (i = 0; i < np; i++) {
iso = buff + (i * sizeof(*iso));
usbip_iso_pakcet_correct_endian(iso, 0);
usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0);
- total_length += urb->iso_frame_desc[i].actual_length;
}
kfree(buff);
- if (total_length != urb->actual_length) {
- dev_err(&urb->dev->dev,
- "total length of iso packets (%d) not equal to actual length of buffer (%d)\n",
- total_length, urb->actual_length);
-
- if (ud->side == USBIP_STUB)
- usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
- else
- usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
-
- return -EPIPE;
- }
-
return ret;
}
EXPORT_SYMBOL_GPL(usbip_recv_iso);
-/*
- * This functions restores the padding which was removed for optimizing
- * the bandwidth during transfer over tcp/ip
- *
- * buffer and iso packets need to be stored and be in propeper endian in urb
- * before calling this function
- */
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
-{
- int np = urb->number_of_packets;
- int i;
- int ret;
- int actualoffset = urb->actual_length;
-
- if (!usb_pipeisoc(urb->pipe))
- return 0;
-
- /* if no packets or length of data is 0, then nothing to unpack */
- if (np == 0 || urb->actual_length == 0)
- return 0;
-
- /*
- * if actual_length is transfer_buffer_length then no padding is
- * present.
- */
- if (urb->actual_length == urb->transfer_buffer_length)
- return 0;
-
- /*
- * loop over all packets from last to first (to prevent overwritting
- * memory when padding) and move them into the proper place
- */
- for (i = np-1; i > 0; i--) {
- actualoffset -= urb->iso_frame_desc[i].actual_length;
- memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset,
- urb->transfer_buffer + actualoffset,
- urb->iso_frame_desc[i].actual_length);
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(usbip_pad_iso);
/* some members of urb must be substituted before. */
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
/* some members of urb must be substituted before. */
int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
-/* some members of urb must be substituted before. */
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
* But, the index of this array begins from 0.
*/
struct vhci_device vdev[VHCI_NPORTS];
+
+ /* vhci_device which has not been assiged its address yet */
+ int pending_port;
};
void vhci_rx_loop(struct usbip_task *ut);
void vhci_tx_loop(struct usbip_task *ut);
-struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
- __u32 seqnum);
-
#define hardware (&the_controller->pdev.dev)
static inline struct vhci_device *port_to_vdev(__u32 port)
* the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
* spin_unlock(&the_controller->vdev[rhport].ud.lock); */
+ the_controller->pending_port = rhport;
+
spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
struct device *dev = &urb->dev->dev;
int ret = 0;
unsigned long flags;
- struct vhci_device *vdev;
usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
hcd, urb, mem_flags);
return urb->status;
}
- vdev = port_to_vdev(urb->dev->portnum-1);
-
- /* refuse enqueue for dead connection */
- spin_lock(&vdev->ud.lock);
- if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
- usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
- spin_unlock(&vdev->ud.lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
- return -ENODEV;
- }
- spin_unlock(&vdev->ud.lock);
-
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto no_need_unlink;
__u8 type = usb_pipetype(urb->pipe);
struct usb_ctrlrequest *ctrlreq =
(struct usb_ctrlrequest *) urb->setup_packet;
+ struct vhci_device *vdev =
+ port_to_vdev(the_controller->pending_port);
if (type != PIPE_CONTROL || !ctrlreq) {
dev_err(dev, "invalid request to devnum 0\n");
dev_info(dev, "SetAddress Request (%d) to port %d\n",
ctrlreq->wValue, vdev->rhport);
- if (vdev->udev)
- usb_put_dev(vdev->udev);
- vdev->udev = usb_get_dev(urb->dev);
+ vdev->udev = urb->dev;
spin_lock(&vdev->ud.lock);
vdev->ud.status = VDEV_ST_USED;
"Get_Descriptor to device 0 "
"(get max pipe size)\n");
- if (vdev->udev)
- usb_put_dev(vdev->udev);
- vdev->udev = usb_get_dev(urb->dev);
+ /* FIXME: reference count? (usb_get_dev()) */
+ vdev->udev = urb->dev;
goto out;
default:
spin_unlock_irqrestore(&vdev->priv_lock, flags2);
}
+
+ if (!vdev->ud.tcp_socket) {
+ /* tcp connection is closed */
+ usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n",
+ urb);
+
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+ spin_unlock_irqrestore(&the_controller->lock, flags);
+ usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
+ urb->status);
+ spin_lock_irqsave(&the_controller->lock, flags);
+ }
+
spin_unlock_irqrestore(&the_controller->lock, flags);
usbip_dbg_vhci_hc("leave\n");
return 0;
}
+
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
{
struct vhci_unlink *unlink, *tmp;
spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
- usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
list_del(&unlink->list);
kfree(unlink);
}
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
- struct urb *urb;
-
- /* give back URB of unanswered unlink request */
- usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
-
- urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
- if (!urb) {
- usbip_uinfo("the urb (seqnum %lu) was already given back\n",
- unlink->unlink_seqnum);
- list_del(&unlink->list);
- kfree(unlink);
- continue;
- }
-
- urb->status = -ENODEV;
-
- spin_lock(&the_controller->lock);
- usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
-
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
-
list_del(&unlink->list);
kfree(unlink);
}
vdev->speed = 0;
vdev->devid = 0;
- if (vdev->udev)
- usb_put_dev(vdev->udev);
- vdev->udev = NULL;
-
ud->tcp_socket = NULL;
ud->status = VDEV_ST_NULL;
usbip_uerr("create hcd failed\n");
return -ENOMEM;
}
- hcd->has_tt = 1;
+
/* this is private data for vhci_hcd */
the_controller = hcd_to_vhci(hcd);
#include "vhci.h"
-/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
-struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+/* get URB from transmitted urb queue */
+static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
__u32 seqnum)
{
struct vhci_priv *priv, *tmp;
struct urb *urb = NULL;
int status;
+ spin_lock(&vdev->priv_lock);
+
list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
if (priv->seqnum == seqnum) {
urb = priv->urb;
}
}
+ spin_unlock(&vdev->priv_lock);
+
return urb;
}
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
- spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
- spin_unlock(&vdev->priv_lock);
if (!urb) {
usbip_uerr("cannot find a urb of seqnum %u\n",
if (usbip_recv_iso(ud, urb) < 0)
return;
- /* restore the padding in iso packets */
- if (usbip_pad_iso(ud, urb) < 0)
- return;
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
return;
}
- spin_lock(&vdev->priv_lock);
-
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
-
- spin_unlock(&vdev->priv_lock);
-
if (!urb) {
/*
* I get the result of a unlink request. But, it seems that I
if (!ACM_READY(acm))
goto exit;
- usb_mark_last_busy(acm->dev);
-
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
break;
}
exit:
+ usb_mark_last_busy(acm->dev);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
if (!ACM_READY(acm))
return;
tty = tty_port_tty_get(&acm->port);
- if (!tty)
- return;
tty_wakeup(tty);
tty_kref_put(tty);
}
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
- tasklet_disable(&acm->urb_task);
for (i = 0; i < nr; i++)
usb_kill_urb(acm->ru[i].urb);
- tasklet_enable(&acm->urb_task);
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
}
{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
- { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
#define ALLOW_SERIAL_NUMBER
static const char *format_topo =
-/* T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=dddd MxCh=dd */
-"\nT: Bus=%2.2d Lev=%2.2d Prnt=%2.2d Port=%2.2d Cnt=%2.2d Dev#=%3d Spd=%-4s MxCh=%2d\n";
+/* T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=ddd MxCh=dd */
+"\nT: Bus=%2.2d Lev=%2.2d Prnt=%2.2d Port=%2.2d Cnt=%2.2d Dev#=%3d Spd=%3s MxCh=%2d\n";
static const char *format_string_manufacturer =
/* S: Manufacturer=xxxx */
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
- if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
+ if (speed == USB_SPEED_HIGH)
interval = 1 << (desc->bInterval - 1);
else
interval = desc->bInterval;
default: /* "can't happen" */
return start;
}
- interval *= (speed == USB_SPEED_HIGH ||
- speed == USB_SPEED_SUPER) ? 125 : 1000;
+ interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000;
if (interval % 1000)
unit = 'u';
else {
speed = "1.5"; break;
case USB_SPEED_UNKNOWN: /* usb 1.1 root hub code */
case USB_SPEED_FULL:
- speed = "12"; break;
+ speed = "12 "; break;
case USB_SPEED_HIGH:
speed = "480"; break;
- case USB_SPEED_SUPER:
- speed = "5000"; break;
default:
- speed = "??";
+ speed = "?? ";
}
data_end = pages_start + sprintf(pages_start, format_topo,
bus->busnum, level, parent_devnum,
if (level == 0) {
int max;
- /* super/high speed reserves 80%, full/low reserves 90% */
- if (usbdev->speed == USB_SPEED_HIGH ||
- usbdev->speed == USB_SPEED_SUPER)
+ /* high speed reserves 80%, full/low reserves 90% */
+ if (usbdev->speed == USB_SPEED_HIGH)
max = 800;
else
max = FRAME_TIME_MAX_USECS_ALLOC;
if (!hcd)
return;
- if (hcd->driver->shutdown) {
+ if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
- pci_disable_device(dev);
- }
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
struct usb_device *hdev = hub->hdev;
- struct usb_hcd *hcd;
- int ret;
int port1;
int status;
bool need_debounce_delay = false;
atomic_set(&to_usb_interface(hub->intfdev)->
pm_usage_cnt, 1);
return; /* Continues at init2: below */
- } else if (type == HUB_RESET_RESUME) {
- /* The internal host controller state for the hub device
- * may be gone after a host power loss on system resume.
- * Update the device's info so the HW knows it's a hub.
- */
- hcd = bus_to_hcd(hdev->bus);
- if (hcd->driver->update_hub_device) {
- ret = hcd->driver->update_hub_device(hcd, hdev,
- &hub->tt, GFP_NOIO);
- if (ret < 0) {
- dev_err(hub->intfdev, "Host not "
- "accepting hub info "
- "update.\n");
- dev_err(hub->intfdev, "LS/FS devices "
- "and hubs may not work "
- "under this hub\n.");
- }
- }
- hub_power_on(hub, true);
} else {
hub_power_on(hub, true);
}
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- device_set_wakeup_capable(&udev->dev,
+ device_init_wakeup(&udev->dev,
(udev->actconfig->desc.bmAttributes
& USB_CONFIG_ATT_WAKEUP));
else
- device_set_wakeup_capable(&udev->dev, 0);
+ device_init_wakeup(&udev->dev, 0);
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
{
int err;
- if (udev->parent) {
- /* Increment the parent's count of unsuspended children */
+ /* Increment the parent's count of unsuspended children */
+ if (udev->parent)
usb_autoresume_device(udev->parent);
- /* Initialize non-root-hub device wakeup to disabled;
- * device (un)configuration controls wakeup capable
- * sysfs power/wakeup controls wakeup enabled/disabled
- */
- device_init_wakeup(&udev->dev, 0);
- }
-
err = usb_enumerate_device(udev); /* Read descriptors */
if (err < 0)
goto fail;
udev->ttport = hdev->ttport;
} else if (udev->speed != USB_SPEED_HIGH
&& hdev->speed == USB_SPEED_HIGH) {
-
- /* yk@rk 20110617
- * parent hub has no TT would not be error in rk29
- */
- #if 0
- if (!hub->tt.hub) {
- dev_err(&udev->dev, "parent hub has no TT\n");
- retval = -EINVAL;
- goto fail;
- }
- #endif
udev->tt = &hub->tt;
udev->ttport = port1;
}
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
- /* Samsung Android phone modem - ID conflict with SPH-I500 */
- { USB_DEVICE(0x04e8, 0x6601), .driver_info =
- USB_QUIRK_CONFIG_INTF_STRINGS },
-
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Keytouch QWERTY Panel keyboard */
- { USB_DEVICE(0x0926, 0x3333), .driver_info =
- USB_QUIRK_CONFIG_INTF_STRINGS },
-
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
* parameters are in UTF-8 (superset of ASCII's 7 bit characters).
*/
-static ushort idVendor;
+static ushort __initdata idVendor;
module_param(idVendor, ushort, S_IRUGO);
MODULE_PARM_DESC(idVendor, "USB Vendor ID");
-static ushort idProduct;
+static ushort __initdata idProduct;
module_param(idProduct, ushort, S_IRUGO);
MODULE_PARM_DESC(idProduct, "USB Product ID");
-static ushort bcdDevice;
+static ushort __initdata bcdDevice;
module_param(bcdDevice, ushort, S_IRUGO);
MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
-static char *iManufacturer;
+static char *__initdata iManufacturer;
module_param(iManufacturer, charp, S_IRUGO);
MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
-static char *iProduct;
+static char *__initdata iProduct;
module_param(iProduct, charp, S_IRUGO);
MODULE_PARM_DESC(iProduct, "USB Product string");
-static char *iSerialNum;
+static char *__initdata iSerialNum;
module_param(iSerialNum, charp, S_IRUGO);
MODULE_PARM_DESC(iSerialNum, "1");
-static char *iPNPstring;
+static char *__initdata iPNPstring;
module_param(iPNPstring, charp, S_IRUGO);
MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
-/* for ASPM quirk of ISOC on AMD SB800 */
-static struct pci_dev *amd_nb_dev;
-
/*-------------------------------------------------------------------------*/
#include "ehci.h"
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
- if (amd_nb_dev) {
- pci_dev_put(amd_nb_dev);
- amd_nb_dev = NULL;
- }
-
#ifdef EHCI_STATS
ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
ehci->iaa_watchdog.function = ehci_iaa_watchdog;
ehci->iaa_watchdog.data = (unsigned long) ehci;
- hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
-
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
-
- if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
- /* periodic schedule size can be smaller than default */
- switch (EHCI_TUNE_FLS) {
- case 0: ehci->periodic_size = 1024; break;
- case 1: ehci->periodic_size = 512; break;
- case 2: ehci->periodic_size = 256; break;
- default: BUG();
- }
- }
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
+ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
ehci->i_thresh = 8;
else // N microframes cached
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
+ switch (EHCI_TUNE_FLS) {
+ case 0: ehci->periodic_size = 1024; break;
+ case 1: ehci->periodic_size = 512; break;
+ case 2: ehci->periodic_size = 256; break;
+ default: BUG();
+ }
}
ehci->command = temp;
return 0;
}
-static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
-{
- struct pci_dev *amd_smbus_dev;
- u8 rev = 0;
-
- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
- if (amd_smbus_dev) {
- pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
- if (rev < 0x40) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- return 0;
- }
- } else {
- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
- if (!amd_smbus_dev)
- return 0;
- pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
- if (rev < 0x11 || rev > 0x18) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- return 0;
- }
- }
-
- if (!amd_nb_dev)
- amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
-
- ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
-
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
-
- return 1;
-}
-
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- if (ehci_quirk_amd_hudson(ehci))
- ehci->amd_l1_fix = 1;
-
retval = ehci_halt(ehci);
if (retval)
return retval;
int stopped;
unsigned count = 0;
u8 state;
+ const __le32 halt = HALT_BIT(ehci);
struct ehci_qh_hw *hw = qh->hw;
if (unlikely (list_empty (&qh->qtd_list)))
&& !(qtd->hw_alt_next
& EHCI_LIST_END(ehci))) {
stopped = 1;
+ goto halt;
}
/* stop scanning when we reach qtds the hc is using */
*/
ehci_clear_tt_buffer(ehci, qh, urb, token);
}
+
+ /* force halt for unlinked or blocked qh, so we'll
+ * patch the qh later and so that completions can't
+ * activate it while we "know" it's stopped.
+ */
+ if ((halt & hw->hw_token) == 0) {
+halt:
+ hw->hw_token |= halt;
+ wmb ();
+ }
}
/* unless we already know the urb's status, collect qtd status
static void scan_async (struct ehci_hcd *ehci)
{
- bool stopped;
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
timer_action_done (ehci, TIMER_ASYNC_SHRINK);
rescan:
- stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
qh = ehci->async->qh_next.qh;
if (likely (qh != NULL)) {
do {
/* clean any finished work for this qh */
- if (!list_empty(&qh->qtd_list) && (stopped ||
- qh->stamp != ehci->stamp)) {
+ if (!list_empty (&qh->qtd_list)
+ && qh->stamp != ehci->stamp) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
- * qhs we already finished (no looping)
- * unless the controller is stopped.
+ * qhs we already finished (no looping).
*/
qh = qh_get (qh);
qh->stamp = ehci->stamp;
*/
if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
- if (!ehci->reclaim && (stopped ||
- ((ehci->stamp - qh->stamp) & 0x1fff)
- >= EHCI_SHRINK_FRAMES * 8))
+ if (!ehci->reclaim
+ && ((ehci->stamp - qh->stamp) & 0x1fff)
+ >= (EHCI_SHRINK_FRAMES * 8))
start_unlink_async(ehci, qh);
else
action = TIMER_ASYNC_SHRINK;
*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
-#define AB_REG_BAR_LOW 0xe0
-#define AB_REG_BAR_HIGH 0xe1
-#define AB_INDX(addr) ((addr) + 0x00)
-#define AB_DATA(addr) ((addr) + 0x04)
-#define NB_PCIE_INDX_ADDR 0xe0
-#define NB_PCIE_INDX_DATA 0xe4
-#define NB_PIF0_PWRDOWN_0 0x01100012
-#define NB_PIF0_PWRDOWN_1 0x01100013
-
-static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
-{
- u32 addr, addr_low, addr_high, val;
-
- outb_p(AB_REG_BAR_LOW, 0xcd6);
- addr_low = inb_p(0xcd7);
- outb_p(AB_REG_BAR_HIGH, 0xcd6);
- addr_high = inb_p(0xcd7);
- addr = addr_high << 8 | addr_low;
- outl_p(0x30, AB_INDX(addr));
- outl_p(0x40, AB_DATA(addr));
- outl_p(0x34, AB_INDX(addr));
- val = inl_p(AB_DATA(addr));
-
- if (disable) {
- val &= ~0x8;
- val |= (1 << 4) | (1 << 9);
- } else {
- val |= 0x8;
- val &= ~((1 << 4) | (1 << 9));
- }
- outl_p(val, AB_DATA(addr));
-
- if (amd_nb_dev) {
- addr = NB_PIF0_PWRDOWN_0;
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
- pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
- if (disable)
- val &= ~(0x3f << 7);
- else
- val |= 0x3f << 7;
-
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
-
- addr = NB_PIF0_PWRDOWN_1;
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
- pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
- if (disable)
- val &= ~(0x3f << 7);
- else
- val |= 0x3f << 7;
-
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
- }
-
- return;
-}
-
/* fit urb's itds into the selected schedule slot; activate as needed */
static int
itd_link_urb (
next_uframe >> 3, next_uframe & 0x7);
stream->start = jiffies;
}
-
- if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 1);
- }
-
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
- if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 0);
- }
-
if (unlikely(list_is_singular(&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
stream->interval, hc32_to_cpu(ehci, stream->splits));
stream->start = jiffies;
}
-
- if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 1);
- }
-
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
- if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 0);
- }
-
if (list_is_singular(&stream->td_list)) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
unsigned broken_periodic:1;
- unsigned amd_l1_fix:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
ohci = hcd_to_ohci (hcd);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
-
- /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
- ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
- OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
- OHCI_CTRL_RWC);
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
-
+ ohci_usb_reset (ohci);
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
}
return 0;
}
-/* nVidia controllers continue to drive Reset signalling on the bus
- * even after system shutdown, wasting power. This flag tells the
- * shutdown routine to leave the controller OPERATIONAL instead of RESET.
- */
-static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- ohci->flags |= OHCI_QUIRK_SHUTDOWN;
- ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
-
- return 0;
-}
-
/*
* The hardware normally enables the A-link power management feature, which
* lets the system lower the power consumption in idle states.
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
- {
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
- .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
- },
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
-#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
- u32 control;
if (!mmio_resource_enabled(pdev, 0))
return;
if (base == NULL)
return;
- control = readl(base + OHCI_CONTROL);
-
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
-#ifdef __hppa__
-#define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR)
-#else
-#define OHCI_CTRL_MASK OHCI_CTRL_RWC
-
+#ifndef __hppa__
+{
+ u32 control = readl(base + OHCI_CONTROL);
if (control & OHCI_CTRL_IR) {
int wait_time = 500; /* arbitrary; 5 seconds */
writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
" (BIOS bug?) %08x\n",
readl(base + OHCI_CONTROL));
+
+ /* reset controller, preserving RWC */
+ writel(control & OHCI_CTRL_RWC, base + OHCI_CONTROL);
}
+}
#endif
- /* reset controller, preserving RWC (and possibly IR) */
- writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
-
/*
* disable interrupts
*/
return 0;
}
-/*
- * Convert interval expressed as 2^(bInterval - 1) == interval into
- * straight exponent value 2^n == interval.
- *
- */
-static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
- struct usb_host_endpoint *ep)
-{
- unsigned int interval;
-
- interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
- if (interval != ep->desc.bInterval - 1)
- dev_warn(&udev->dev,
- "ep %#x - rounding interval to %d microframes\n",
- ep->desc.bEndpointAddress,
- 1 << interval);
-
- return interval;
-}
-
-/*
- * Convert bInterval expressed in frames (in 1-255 range) to exponent of
- * microframes, rounded down to nearest power of 2.
- */
-static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
- struct usb_host_endpoint *ep)
-{
- unsigned int interval;
-
- interval = fls(8 * ep->desc.bInterval) - 1;
- interval = clamp_val(interval, 3, 10);
- if ((1 << interval) != 8 * ep->desc.bInterval)
- dev_warn(&udev->dev,
- "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
- ep->desc.bEndpointAddress,
- 1 << interval,
- 8 * ep->desc.bInterval);
-
- return interval;
-}
-
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
case USB_SPEED_HIGH:
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
- usb_endpoint_xfer_bulk(&ep->desc)) {
+ usb_endpoint_xfer_bulk(&ep->desc))
interval = ep->desc.bInterval;
- break;
- }
/* Fall through - SS and HS isoc/int have same decoding */
-
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
- usb_endpoint_xfer_isoc(&ep->desc)) {
- interval = xhci_parse_exponent_interval(udev, ep);
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (ep->desc.bInterval == 0)
+ interval = 0;
+ else
+ interval = ep->desc.bInterval - 1;
+ if (interval > 15)
+ interval = 15;
+ if (interval != ep->desc.bInterval + 1)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
}
break;
-
+ /* Convert bInterval (in 1-255 frames) to microframes and round down to
+ * nearest power of 2.
+ */
case USB_SPEED_FULL:
- if (usb_endpoint_xfer_int(&ep->desc)) {
- interval = xhci_parse_exponent_interval(udev, ep);
- break;
- }
- /*
- * Fall through for isochronous endpoint interval decoding
- * since it uses the same rules as low speed interrupt
- * endpoints.
- */
-
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
- usb_endpoint_xfer_isoc(&ep->desc)) {
-
- interval = xhci_parse_frame_interval(udev, ep);
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = fls(8*ep->desc.bInterval) - 1;
+ if (interval > 10)
+ interval = 10;
+ if (interval < 3)
+ interval = 3;
+ if ((1 << interval) != 8*ep->desc.bInterval)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
}
break;
-
default:
BUG();
}
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg) {
- WARN_ON(1);
- return;
- }
-
+ if (!state->new_deq_seg)
+ BUG();
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg) {
- WARN_ON(1);
- return;
- }
+ if (!state->new_deq_seg)
+ BUG();
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
- /*
- * If there is only one segment in a ring, find_trb_seg()'s while loop
- * will not run, and it will return before it has a chance to see if it
- * needs to toggle the cycle bit. It can't tell if the stalled transfer
- * ended just before the link TRB on a one-segment ring, or if the TD
- * wrapped around the top of the ring, because it doesn't have the TD in
- * question. Look for the one-segment case where stalled TRB's address
- * is greater than the new dequeue pointer address.
- */
- if (ep_ring->first_seg == ep_ring->first_seg->next &&
- state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
- state->new_cycle_state ^= 0x1;
- xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
-
/* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
- running_total &= TRB_MAX_BUFF_SIZE - 1;
+ (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg) && running_total < temp) {
+ while (running_total < sg_dma_len(sg)) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
sg = urb->sg->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & (TRB_MAX_BUFF_SIZE - 1));
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
- running_total &= TRB_MAX_BUFF_SIZE - 1;
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
- if (trb_buff_len > urb->transfer_buffer_length)
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (urb->transfer_buffer_length < trb_buff_len)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
* notification type that matches a bit set in this bit field.
*/
#define DEV_NOTE_MASK (0xffff)
-#define ENABLE_DEV_NOTE(x) (1 << (x))
+#define ENABLE_DEV_NOTE(x) (1 << x)
/* Most of the device notification types should only be used for debug.
* SW does need to pay attention to function wake notifications.
*/
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
/* Mult - Max number of burtst within an interval, in EP companion desc. */
-#define EP_MULT(p) (((p) & 0x3) << 8)
+#define EP_MULT(p) ((p & 0x3) << 8)
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
-#define EP_INTERVAL(p) (((p) & 0xff) << 16)
+#define EP_INTERVAL(p) ((p & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
/* ep_info2 bitmasks */
case USB_DEVICE_ID_CODEMERCS_IOWPV2:
case USB_DEVICE_ID_CODEMERCS_IOW40:
/* IOW24 and IOW40 use a synchronous call */
- buf = kmalloc(count, GFP_KERNEL);
+ buf = kmalloc(8, GFP_KERNEL); /* 8 bytes are enough for both products */
if (!buf) {
retval = -ENOMEM;
goto exit;
spin_lock_irqsave(&priv->asynclock, flags);
list_add_tail(&rq->asynclist, &priv->asynclist);
spin_unlock_irqrestore(&priv->asynclock, flags);
- kref_get(&rq->ref_count);
ret = usb_submit_urb(rq->urb, mem_flags);
- if (!ret)
+ if (!ret) {
+ kref_get(&rq->ref_count);
return rq;
- destroy_async(&rq->ref_count);
+ }
+ kref_put(&rq->ref_count, destroy_async);
err("submit_async_request submit_urb failed with %d", ret);
return NULL;
}
INIT_LIST_HEAD(&musb->out_bulk);
hcd->uses_new_polling = 1;
- hcd->has_tt = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
int musb_platform_exit(struct musb *musb)
{
- del_timer_sync(&musb_idle_timer);
omap_vbus_power(musb, 0 /*off*/, 1);
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7523) },
- { USB_DEVICE(0x1a86, 0x5523) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
if (actual_length >= 4) {
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- u8 prev_line_status = priv->line_status;
spin_lock_irqsave(&priv->lock, flags);
priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
if ((data[1] & CH341_MULT_STAT))
priv->multi_status_change = 1;
spin_unlock_irqrestore(&priv->lock, flags);
-
- if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
- struct tty_struct *tty = tty_port_tty_get(&port->port);
- if (tty)
- usb_serial_handle_dcd_change(port, tty,
- priv->line_status & CH341_BIT_DCD);
- tty_kref_put(tty);
- }
-
wake_up_interruptible(&priv->delta_msr_wait);
}
static int cp210x_startup(struct usb_serial *);
static void cp210x_disconnect(struct usb_serial *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
+static int cp210x_carrier_raised(struct usb_serial_port *p);
static int debug;
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
+ { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
- { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
- { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
.tiocmset = cp210x_tiocmset,
.attach = cp210x_startup,
.disconnect = cp210x_disconnect,
- .dtr_rts = cp210x_dtr_rts
+ .dtr_rts = cp210x_dtr_rts,
+ .carrier_raised = cp210x_carrier_raised
};
/* Config request types */
return result;
}
+static int cp210x_carrier_raised(struct usb_serial_port *p)
+{
+ unsigned int control;
+ cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
+ if (control & CONTROL_DCD)
+ return 1;
+ return 0;
+}
+
static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
static int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void digi_close(struct usb_serial_port *port);
+static int digi_carrier_raised(struct usb_serial_port *port);
static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
.open = digi_open,
.close = digi_close,
.dtr_rts = digi_dtr_rts,
+ .carrier_raised = digi_carrier_raised,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
}
+static int digi_carrier_raised(struct usb_serial_port *port)
+{
+ struct digi_port *priv = usb_get_serial_port_data(port);
+ if (priv->dp_modem_signals & TIOCM_CD)
+ return 1;
+ return 0;
+}
+
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int ret;
static int ftdi_jtag_probe(struct usb_serial *serial);
static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
static int ftdi_NDI_device_setup(struct usb_serial *serial);
-static int ftdi_stmclite_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
.port_probe = ftdi_HE_TIRA1_setup,
};
-static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
- .probe = ftdi_stmclite_probe,
-};
-
/*
* The 8U232AM has the same API as the sio except for:
* - it can support MUCH higher baudrates; up to:
* /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
*/
static struct usb_device_id id_table_combined [] = {
- { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
- { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
{ USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
{ USB_DEVICE(OCT_VID, OCT_US101_PID) },
- { USB_DEVICE(OCT_VID, OCT_DK201_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID),
.driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID),
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
- { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
- { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
+ { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
- { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
- { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
- { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
- .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
return 0;
}
-/*
- * First and second port on STMCLiteadaptors is reserved for JTAG interface
- * and the forth port for pio
- */
-static int ftdi_stmclite_probe(struct usb_serial *serial)
-{
- struct usb_device *udev = serial->dev;
- struct usb_interface *interface = serial->interface;
-
- dbg("%s", __func__);
-
- if (interface == udev->actconfig->interface[2])
- return 0;
-
- dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
-
- return -ENODEV;
-}
-
/*
* The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
* We have to correct it if we want to read from it.
* Hameg HO820 and HO870 interface (using VID 0x0403)
*/
#define HAMEG_HO820_PID 0xed74
-#define HAMEG_HO730_PID 0xed73
-#define HAMEG_HO720_PID 0xed72
#define HAMEG_HO870_PID 0xed71
/*
#define RATOC_VENDOR_ID 0x0584
#define RATOC_PRODUCT_ID_USB60F 0xb020
-/*
- * Acton Research Corp.
- */
-#define ACTON_VID 0x0647 /* Vendor ID */
-#define ACTON_SPECTRAPRO_PID 0x0100
-
/*
* Contec products (http://www.contec.com)
* Submitted by Daniel Sangorrin
/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
-#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */
#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
/*
- * Definitions for Icom Inc. devices
+ * Icom ID-1 digital transceiver
*/
-#define ICOM_VID 0x0C26 /* Icom vendor ID */
-/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
-#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */
-/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
-#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */
-/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
-#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */
-#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */
-#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/
-#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */
-#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */
-#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */
-#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */
-#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */
-#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */
+
+#define ICOM_ID1_VID 0x0C26
+#define ICOM_ID1_PID 0x0004
/*
* GN Otometrics (http://www.otometrics.com)
#define STB_PID 0x0001 /* Sensor Terminal Board */
#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
-/*
- * STMicroelectonics
- */
-#define ST_VID 0x0483
-#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
-
/*
* Papouch products (http://www.papouch.com/)
* Submitted by Folkert van Heusden
#define QIHARDWARE_VID 0x20B7
#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
-/*
- * CTI GmbH RS485 Converter http://www.cti-lean.com/
- */
-/* USB-485-Mini*/
-#define FTDI_CTI_MINI_PID 0xF608
-/* USB-Nano-485*/
-#define FTDI_CTI_NANO_PID 0xF60B
-
-
}
EXPORT_SYMBOL_GPL(usb_serial_handle_break);
-/**
- * usb_serial_handle_dcd_change - handle a change of carrier detect state
- * @port: usb_serial_port structure for the open port
- * @tty: tty_struct structure for the port
- * @status: new carrier detect status, nonzero if active
- */
-void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
- struct tty_struct *tty, unsigned int status)
-{
- struct tty_port *port = &usb_port->port;
-
- dbg("%s - port %d, status %d", __func__, usb_port->number, status);
-
- if (status)
- wake_up_interruptible(&port->open_wait);
- else if (tty && !C_CLOCAL(tty))
- tty_hangup(tty);
-}
-EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
-
int usb_serial_generic_resume(struct usb_serial *serial)
{
struct usb_serial_port *port;
dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
- edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
- edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
+ edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
+ edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
for (rec = ihex_next_binrec(rec); rec;
}
}
+static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+ unsigned char modembits;
+
+ /* If we can read the modem status and the DCD is low then
+ carrier is not raised yet */
+ if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
+ if (!(modembits & (1>>6)))
+ return 0;
+ }
+ /* Carrier raised, or we failed (eg disconnected) so
+ progress accordingly */
+ return 1;
+}
+
static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port)
.id_table = id_table_std,
.num_ports = 1,
.dtr_rts = keyspan_pda_dtr_rts,
+ .carrier_raised = keyspan_pda_carrier_raised,
.open = keyspan_pda_open,
.close = keyspan_pda_close,
.write = keyspan_pda_write,
}
tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* BEGIN DEBUG */
/*
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
-/* Cinterion (formerly Siemens) products */
-#define SIEMENS_VENDOR_ID 0x0681
-#define CINTERION_VENDOR_ID 0x1e2d
-#define CINTERION_PRODUCT_HC25_MDM 0x0047
-#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
-#define CINTERION_PRODUCT_HC28_MDM 0x004C
-#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
-#define CINTERION_PRODUCT_EU3_E 0x0051
-#define CINTERION_PRODUCT_EU3_P 0x0052
-#define CINTERION_PRODUCT_PH8 0x0053
+/* Thinkwill products */
+#define THINKWILL_VENDOR_ID 0x19f5
+#define THINKWILL_PRODUCT_ID 0x9909
+
+#define CINTERION_VENDOR_ID 0x0681
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
#define CELOT_VENDOR_ID 0x211f
#define CELOT_PRODUCT_CT680M 0x6801
-/* ONDA Communication vendor id */
-#define ONDA_VENDOR_ID 0x1ee8
-
-/* ONDA MT825UP HSDPA 14.2 modem */
-#define ONDA_MT825UP 0x000b
-
-/* Samsung products */
-#define SAMSUNG_VENDOR_ID 0x04e8
-#define SAMSUNG_PRODUCT_GT_B3730 0x6889
-
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
- /* Cinterion */
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
- { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
- { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
- { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
- { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
-
+ { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+ { USB_DEVICE(LEADCORE_VENDOR_ID, LEADCORE_PRODUCT_LC1808) }, //zzc
+ { USB_DEVICE(SC8800G_VENDOR_ID,SC8800G_PRODUCT_ID)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
- { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
- { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
+
+// cmy:
+ { USB_DEVICE(0x0685, 0x6000) },
+ { USB_DEVICE(0x1E89, 0x1E16) },
+ { USB_DEVICE(0x7693, 0x0001) },
+ { USB_DEVICE(0x1D09, 0x4308) },
+ { USB_DEVICE(0x1234, 0x0033) },
+ { USB_DEVICE(0xFEED, 0x0001) },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0017) },
+
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
- { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
- { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned long flags;
u8 status_idx = UART_STATE;
u8 length = UART_STATE + 1;
- u8 prev_line_status;
u16 idv, idp;
idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
/* Save off the uart status for others to look at */
spin_lock_irqsave(&priv->lock, flags);
- prev_line_status = priv->line_status;
priv->line_status = data[status_idx];
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->line_status & UART_BREAK_ERROR)
usb_serial_handle_break(port);
wake_up_interruptible(&priv->delta_msr_wait);
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
- if ((priv->line_status ^ prev_line_status) & UART_DCD)
- usb_serial_handle_dcd_change(port, tty,
- priv->line_status & UART_DCD);
- tty_kref_put(tty);
}
static void pl2303_read_int_callback(struct urb *urb)
#define PL2303_PRODUCT_ID_ALDIGA 0x0611
#define PL2303_PRODUCT_ID_MMX 0x0612
#define PL2303_PRODUCT_ID_GPRS 0x0609
-#define PL2303_PRODUCT_ID_HCR331 0x331a
-#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
- { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
- .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
- },
{ }
};
/* how come ??? */
#define UART_STATE 0x08
-#define UART_STATE_TRANSIENT_MASK 0x75
+#define UART_STATE_TRANSIENT_MASK 0x74
#define UART_DCD 0x01
#define UART_DSR 0x02
#define UART_BREAK_ERROR 0x04
tty_insert_flip_char(tty, data[i], tty_flag);
tty_flip_buffer_push(tty);
}
-
- if (status & UART_DCD)
- usb_serial_handle_dcd_change(port, tty,
- priv->line_status & MSR_STATUS_LINE_DCD);
tty_kref_put(tty);
/* Schedule the next read _if_ we are still open */
static void __exit ti_exit(void)
{
- usb_deregister(&ti_usb_driver);
usb_serial_deregister(&ti_1port_device);
usb_serial_deregister(&ti_2port_device);
+ usb_deregister(&ti_usb_driver);
}
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include <linux/usb/cdc.h>
#include "visor.h"
/*
dbg("%s", __func__);
- /*
- * some Samsung Android phones in modem mode have the same ID
- * as SPH-I500, but they are ACM devices, so dont bind to them
- */
- if (id->idVendor == SAMSUNG_VENDOR_ID &&
- id->idProduct == SAMSUNG_SPH_I500_ID &&
- serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
- serial->dev->descriptor.bDeviceSubClass ==
- USB_CDC_SUBCLASS_ACM)
- return -ENODEV;
-
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
"Cypress ISD-300LP",
US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
- "Super Top",
- "USB 2.0 SATA BRIDGE",
- US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
-
#endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_BULK32),
-/* Reported by <ttkspam@free.fr>
- * The device reports a vendor-specific device class, requiring an
- * explicit vendor/product match.
- */
-UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002,
- "MagicPixel",
- "FW_Omega2",
- US_SC_DEVICE, US_PR_DEVICE, NULL, 0),
-
/* Andrew Lunn <andrew@lunn.ch>
* PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
* on LUN 4.
US_FL_IGNORE_DEVICE ),
#endif
-/* Submitted by Nick Holloway */
-UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
- "VTech",
- "Kidizoom",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_BAD_SENSE ),
-/* Patch by Richard Schütz <r.schtz@t-online.de>
- * This external hard drive enclosure uses a JMicron chip which
- * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
-UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
- "TrekStor GmbH & Co. KG",
- "DataStation maxi g.u",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
-
-/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
-UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
- "Coby Electronics",
- "MP3 Player",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
-
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
},
.driver_data = (void *)&nvidia_chipset_data,
},
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookAir 3,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
- {
- .callback = mbp_dmi_match,
- .ident = "MacBookAir 3,2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
- },
- .driver_data = (void *)&nvidia_chipset_data,
- },
{ }
};
int softback_lines, int fg, int bg)
{
struct fb_tilecursor cursor;
- int use_sw = (vc->vc_cursor_type & 0x10);
+ int use_sw = (vc->vc_cursor_type & 0x01);
cursor.sx = vc->vc_x;
cursor.sy = vc->vc_y;
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
+/* A PCI device has it's own struct device and so does a virtio device so
+ * we create a place for the virtio devices to show up in sysfs. I think it
+ * would make more sense for virtio to not insist on having it's own device. */
+static struct device *virtio_pci_root;
+
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
{
if (vp_dev == NULL)
return -ENOMEM;
- vp_dev->vdev.dev.parent = &pci_dev->dev;
+ vp_dev->vdev.dev.parent = virtio_pci_root;
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->vdev.config = &virtio_pci_config_ops;
vp_dev->pci_dev = pci_dev;
goto out_req_regions;
pci_set_drvdata(pci_dev, vp_dev);
- pci_set_master(pci_dev);
/* we use the subsystem vendor/device id as the virtio vendor/device
* id. this allows us to use the same PCI vendor/device id for all
static int __init virtio_pci_init(void)
{
- return pci_register_driver(&virtio_pci_driver);
+ int err;
+
+ virtio_pci_root = root_device_register("virtio-pci");
+ if (IS_ERR(virtio_pci_root))
+ return PTR_ERR(virtio_pci_root);
+
+ err = pci_register_driver(&virtio_pci_driver);
+ if (err)
+ root_device_unregister(virtio_pci_root);
+
+ return err;
}
module_init(virtio_pci_init);
static void __exit virtio_pci_exit(void)
{
pci_unregister_driver(&virtio_pci_driver);
+ root_device_unregister(virtio_pci_root);
}
module_exit(virtio_pci_exit);
ctx->reqs_active--;
if (unlikely(!ctx->reqs_active && ctx->dead))
- wake_up_all(&ctx->wait);
+ wake_up(&ctx->wait);
}
static void aio_fput_routine(struct work_struct *data)
* by other CPUs at this point. Right now, we rely on the
* locking done by the above calls to ensure this consistency.
*/
- wake_up_all(&ioctx->wait);
+ wake_up(&ioctx->wait);
put_ioctx(ioctx); /* once for the lookup */
}
#define BTRFS_INODE_DIRSYNC (1 << 10)
-#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
-
/* some macros to generate set/get funcs for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
* one for u8:
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
-void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
-
/* dir-item.c */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node);
out:
- if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
+ if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
root->ref_cows = 1;
- btrfs_check_and_init_root_item(&root->root_item);
- }
return root;
}
inode_item->nbytes = cpu_to_le64(root->leafsize);
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
- root_item.flags = 0;
- root_item.byte_limit = 0;
- inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT);
-
btrfs_set_root_bytenr(&root_item, leaf->start);
btrfs_set_root_generation(&root_item, trans->transid);
btrfs_set_root_level(&root_item, 0);
btrfs_free_path(path);
return 0;
}
-
-/*
- * Old btrfs forgets to init root_item->flags and root_item->byte_limit
- * for subvolumes. To work around this problem, we steal a bit from
- * root_item->inode_item->flags, and use it to indicate if those fields
- * have been properly initialized.
- */
-void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item)
-{
- u64 inode_flags = le64_to_cpu(root_item->inode.flags);
-
- if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) {
- inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT;
- root_item->inode.flags = cpu_to_le64(inode_flags);
- root_item->flags = 0;
- root_item->byte_limit = 0;
- }
-}
record_root_in_trans(trans, root);
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
- btrfs_check_and_init_root_item(new_root_item);
key.objectid = objectid;
/* record when the snapshot was created in key.offset */
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/iocontext.h>
-#include <linux/capability.h>
#include <asm/div64.h>
#include "compat.h"
#include "ctree.h"
if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
mutex_lock(&dev_root->fs_info->volume_mutex);
dev_root = dev_root->fs_info->dev_root;
const __u16 netfid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
- const bool waitFlag, const __u8 oplock_level);
+ const bool waitFlag);
extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag,
const __u64 len, struct file_lock *,
CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const __u64 len,
const __u64 offset, const __u32 numUnlock,
- const __u32 numLock, const __u8 lockType,
- const bool waitFlag, const __u8 oplock_level)
+ const __u32 numLock, const __u8 lockType, const bool waitFlag)
{
int rc = 0;
LOCK_REQ *pSMB = NULL;
pSMB->NumberOfLocks = cpu_to_le16(numLock);
pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
pSMB->LockType = lockType;
- pSMB->OplockLevel = oplock_level;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */
cifs_parse_mount_options(char *options, const char *devname,
struct smb_vol *vol)
{
- char *value, *data, *end;
+ char *value;
+ char *data;
unsigned int temp_len, i, j;
char separator[2];
short int override_uid = -1;
if (!options)
return 1;
- end = options + strlen(options);
if (strncmp(options, "sep=", 4) == 0) {
if (options[4] != 0) {
separator[0] = options[4];
the only illegal character in a password is null */
if ((value[temp_len] == 0) &&
- (value + temp_len < end) &&
(value[temp_len+1] == separator[0])) {
/* reinsert comma */
value[temp_len] = separator[0];
0 /* not legacy */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
-
- if (rc == -EOPNOTSUPP || rc == -EINVAL)
- rc = SMBQueryInformation(xid, tcon, full_path, pfile_info,
- cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
kfree(pfile_info);
return rc;
}
remote_path_check:
/* check if a whole path (including prepath) is not remote */
- if (!rc && tcon) {
+ if (!rc && cifs_sb->prepathlen && tcon) {
/* build_path_to_root works only when we have a valid tcon */
full_path = cifs_build_path_to_root(cifs_sb);
if (full_path == NULL) {
/* BB we could chain these into one lock request BB */
rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
- 0, 1, lockType, 0 /* wait flag */, 0);
+ 0, 1, lockType, 0 /* wait flag */ );
if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
pfLock->fl_start, 1 /* numUnlock */ ,
0 /* numLock */ , lockType,
- 0 /* wait flag */, 0);
+ 0 /* wait flag */ );
pfLock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, ("Error unlocking previously locked "
if (numLock) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
- pfLock->fl_start, 0, numLock, lockType,
- wait_flag, 0);
+ pfLock->fl_start,
+ 0, numLock, lockType, wait_flag);
if (rc == 0) {
/* For Windows locks we must store them. */
(pfLock->fl_start + length) >=
(li->offset + li->length)) {
stored_rc = CIFSSMBLock(xid, tcon,
- netfid, li->length,
- li->offset, 1, 0,
- li->type, false, 0);
+ netfid,
+ li->length, li->offset,
+ 1, 0, li->type, false);
if (stored_rc)
rc = stored_rc;
*/
if (!cfile->closePend && !cfile->oplock_break_cancelled) {
rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
- LOCKING_ANDX_OPLOCK_RELEASE, false,
- cinode->clientCanCacheRead ? 1 : 0);
+ LOCKING_ANDX_OPLOCK_RELEASE, false);
cFYI(1, ("Oplock release rc = %d", rc));
}
}
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
- if (!inode) {
- inode = ERR_PTR(rc);
- goto out;
- }
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
if (rc && cifs_sb->tcon->ipc) {
cFYI(1, ("ipc connection - fake read inode"));
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
} else if (rc) {
+ kfree(full_path);
+ _FreeXid(xid);
iget_failed(inode);
- inode = ERR_PTR(rc);
+ return ERR_PTR(rc);
}
-out:
+
kfree(full_path);
/* can not call macro FreeXid here since in a void func
* TODO: This is no longer true
}
/* BB check if Unicode and decode strings */
- if (bytes_remaining == 0) {
- /* no string area to decode, do nothing */
- } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+ if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
++bcc_ptr;
argv++;
if (i++ >= max)
return -E2BIG;
-
- if (fatal_signal_pending(current))
- return -ERESTARTNOHAND;
- cond_resched();
}
}
return i;
while (len > 0) {
int offset, bytes_to_copy;
- if (fatal_signal_pending(current)) {
- ret = -ERESTARTNOHAND;
- goto out;
- }
- cond_resched();
-
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
struct page *page;
- page = get_arg_page(bprm, pos, 1);
- if (!page) {
+#ifdef CONFIG_STACK_GROWSUP
+ ret = expand_stack_downwards(bprm->vma, pos);
+ if (ret < 0) {
+ /* We've exceed the stack rlimit. */
+ ret = -E2BIG;
+ goto out;
+ }
+#endif
+ ret = get_user_pages(current, bprm->mm, pos,
+ 1, 1, 1, &page, NULL);
+ if (ret <= 0) {
+ /* We've exceed the stack rlimit. */
ret = -E2BIG;
goto out;
}
return retval;
out:
- if (bprm->mm) {
- acct_arg_size(bprm, 0);
+ if (bprm->mm)
mmput(bprm->mm);
- }
out_file:
if (bprm->file) {
spin_unlock(&tmp->d_lock);
spin_unlock(&dcache_lock);
- security_d_instantiate(tmp, inode);
return tmp;
out_iput:
- if (res && !IS_ERR(res))
- security_d_instantiate(res, inode);
iput(inode);
return res;
}
rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
ecryptfs_dentry_to_lower(dentry), &lower_stat);
if (!rc) {
- fsstack_copy_attr_all(dentry->d_inode,
- ecryptfs_inode_to_lower(dentry->d_inode), NULL);
generic_fillattr(dentry->d_inode, stat);
stat->blocks = lower_stat.blocks;
}
printk(KERN_ERR "Could not find key with description: [%s]\n",
sig);
rc = process_request_key_err(PTR_ERR(*auth_tok_key));
- (*auth_tok_key) = NULL;
goto out;
}
(*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key);
&& (pos != 0))
zero_user(page, 0, PAGE_CACHE_SIZE);
out:
- if (unlikely(rc)) {
- unlock_page(page);
- page_cache_release(page);
- *pagep = NULL;
- }
return rc;
}
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
- * It is also acquired when inserting an epoll fd onto another epoll
- * fd. We do this so that we walk the epoll tree and ensure that this
- * insertion does not create a cycle of epoll file descriptors, which
- * could lead to deadlock. We need a global mutex to prevent two
- * simultaneous inserts (A into B and B into A) from racing and
- * constructing a cycle without either insert observing that it is
- * going to.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epmutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
*/
static DEFINE_MUTEX(epmutex);
-/* Used to check for epoll file descriptor inclusion loops */
-static struct nested_calls poll_loop_ncalls;
-
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;
return res;
}
-/**
- * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
- * API, to verify that adding an epoll file inside another
- * epoll structure, does not violate the constraints, in
- * terms of closed loops, or too deep chains (which can
- * result in excessive stack usage).
- *
- * @priv: Pointer to the epoll file to be currently checked.
- * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
- * data structure pointer.
- * @call_nests: Current dept of the @ep_call_nested() call stack.
- *
- * Returns: Returns zero if adding the epoll @file inside current epoll
- * structure @ep does not violate the constraints, or -1 otherwise.
- */
-static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
-{
- int error = 0;
- struct file *file = priv;
- struct eventpoll *ep = file->private_data;
- struct rb_node *rbp;
- struct epitem *epi;
-
- mutex_lock(&ep->mtx);
- for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
- epi = rb_entry(rbp, struct epitem, rbn);
- if (unlikely(is_file_epoll(epi->ffd.file))) {
- error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
- ep_loop_check_proc, epi->ffd.file,
- epi->ffd.file->private_data, current);
- if (error != 0)
- break;
- }
- }
- mutex_unlock(&ep->mtx);
-
- return error;
-}
-
-/**
- * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
- * another epoll file (represented by @ep) does not create
- * closed loops or too deep chains.
- *
- * @ep: Pointer to the epoll private data structure.
- * @file: Pointer to the epoll file to be checked.
- *
- * Returns: Returns zero if adding the epoll @file inside current epoll
- * structure @ep does not violate the constraints, or -1 otherwise.
- */
-static int ep_loop_check(struct eventpoll *ep, struct file *file)
-{
- return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
- ep_loop_check_proc, file, ep, current);
-}
-
/*
* Open an eventpoll file descriptor.
*/
struct epoll_event __user *, event)
{
int error;
- int did_lock_epmutex = 0;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
*/
ep = file->private_data;
- /*
- * When we insert an epoll file descriptor, inside another epoll file
- * descriptor, there is the change of creating closed loops, which are
- * better be handled here, than in more critical paths.
- *
- * We hold epmutex across the loop check and the insert in this case, in
- * order to prevent two separate inserts from racing and each doing the
- * insert "at the same time" such that ep_loop_check passes on both
- * before either one does the insert, thereby creating a cycle.
- */
- if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
- mutex_lock(&epmutex);
- did_lock_epmutex = 1;
- error = -ELOOP;
- if (ep_loop_check(ep, tfile) != 0)
- goto error_tgt_fput;
- }
-
-
mutex_lock(&ep->mtx);
/*
mutex_unlock(&ep->mtx);
error_tgt_fput:
- if (unlikely(did_lock_epmutex))
- mutex_unlock(&epmutex);
-
fput(tfile);
error_fput:
fput(file);
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
- /*
- * Initialize the structure used to perform epoll file descriptor
- * inclusion loops checks.
- */
- ep_nested_calls_init(&poll_loop_ncalls);
-
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
#ifdef CONFIG_MMU
-void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
-{
- struct mm_struct *mm = current->mm;
- long diff = (long)(pages - bprm->vma_pages);
-
- if (!mm || !diff)
- return;
-
- bprm->vma_pages = pages;
-
- down_write(&mm->mmap_sem);
- mm->total_vm += diff;
- up_write(&mm->mmap_sem);
-}
-
-struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
struct rlimit *rlim;
- acct_arg_size(bprm, size / PAGE_SIZE);
-
/*
* We've historically supported up to 32 pages (ARG_MAX)
* of argument strings even with small stacks
#else
-void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
-{
-}
-
-struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
/*
* Release all of the old mmap stuff
*/
- acct_arg_size(bprm, 0);
retval = exec_mmap(bprm->mm);
if (retval)
goto out;
return retval;
out:
- if (bprm->mm) {
- acct_arg_size(bprm, 0);
- mmput(bprm->mm);
- }
+ if (bprm->mm)
+ mmput (bprm->mm);
out_file:
if (bprm->file) {
new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
+ inode_inc_link_count(old_inode);
ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
new_inode->i_ctime = CURRENT_TIME_SEC;
if (dir_de)
if (new_dir->i_nlink >= EXT2_LINK_MAX)
goto out_dir;
}
+ inode_inc_link_count(old_inode);
err = ext2_add_link(new_dentry, old_inode);
- if (err)
+ if (err) {
+ inode_dec_link_count(old_inode);
goto out_dir;
+ }
if (dir_de)
inode_inc_link_count(new_dir);
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
+ * inode_dec_link_count() will mark the inode dirty.
*/
old_inode->i_ctime = CURRENT_TIME_SEC;
- mark_inode_dirty(old_inode);
ext2_delete_entry (old_de, old_page);
+ inode_dec_link_count(old_inode);
if (dir_de) {
if (old_dir != new_dir)
goto cleanup;
node2 = (struct dx_node *)(bh2->b_data);
entries2 = node2->entries;
- memset(&node2->fake, 0, sizeof(struct fake_dirent));
node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize);
+ node2->fake.inode = 0;
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext3_journal_get_write_access(handle, frame->bh);
if (err)
return;
}
- /* Check if feature set allows readwrite operations */
- if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
- printk(KERN_INFO "EXT3-fs: %s: Skipping orphan cleanup due to "
- "unknown ROCOMPAT features\n", sb->s_id);
- return;
- }
-
if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
/* if nrblocks are contiguous */
if (chunk) {
/*
- * With N contiguous data blocks, we need at most
- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
- * 2 dindirect blocks, and 1 tindirect block
+ * With N contiguous data blocks, it need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
+ * 2 dindirect blocks
+ * 1 tindirect block
*/
- return DIV_ROUND_UP(nrblocks,
- EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
+ indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ return indirects + 3;
}
/*
* if nrblocks are not contiguous, worse case, each block touch
goto fail;
percpu_counter_inc(&nr_files);
- f->f_cred = get_cred(cred);
if (security_file_alloc(f))
goto fail_sec;
INIT_LIST_HEAD(&f->f_u.fu_list);
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
+ f->f_cred = get_cred(cred);
spin_lock_init(&f->f_lock);
eventpoll_init_file(f);
/* f->f_version: 0 */
return ff;
}
-static void fuse_release_async(struct work_struct *work)
-{
- struct fuse_req *req;
- struct fuse_conn *fc;
- struct path path;
-
- req = container_of(work, struct fuse_req, misc.release.work);
- path = req->misc.release.path;
- fc = get_fuse_conn(path.dentry->d_inode);
-
- fuse_put_request(fc, req);
- path_put(&path);
-}
-
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
- if (fc->destroy_req) {
- /*
- * If this is a fuseblk mount, then it's possible that
- * releasing the path will result in releasing the
- * super block and sending the DESTROY request. If
- * the server is single threaded, this would hang.
- * For this reason do the path_put() in a separate
- * thread.
- */
- atomic_inc(&req->count);
- INIT_WORK(&req->misc.release.work, fuse_release_async);
- schedule_work(&req->misc.release.work);
- } else {
- path_put(&req->misc.release.path);
- }
+ path_put(&req->misc.release.path);
}
-static void fuse_file_put(struct fuse_file *ff, bool sync)
+static void fuse_file_put(struct fuse_file *ff)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- if (sync) {
- fuse_request_send(ff->fc, req);
- path_put(&req->misc.release.path);
- fuse_put_request(ff->fc, req);
- } else {
- req->end = fuse_release_end;
- fuse_request_send_background(ff->fc, req);
- }
+ req->end = fuse_release_end;
+ fuse_request_send_background(ff->fc, req);
kfree(ff);
}
}
* Normally this will send the RELEASE request, however if
* some asynchronous READ or WRITE requests are outstanding,
* the sending will be delayed.
- *
- * Make the release synchronous if this is a fuseblk mount,
- * synchronous RELEASE is allowed (and desirable) in this case
- * because the server can be trusted not to screw up.
*/
- fuse_file_put(ff, ff->fc->destroy_req != NULL);
+ fuse_file_put(ff);
}
static int fuse_open(struct inode *inode, struct file *file)
unlock_page(page);
}
if (req->ff)
- fuse_file_put(req->ff, false);
+ fuse_file_put(req->ff);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
__free_page(req->pages[0]);
- fuse_file_put(req->ff, false);
+ fuse_file_put(req->ff);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/poll.h>
-#include <linux/workqueue.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
union {
struct fuse_forget_in forget_in;
struct {
- union {
- struct fuse_release_in in;
- struct work_struct work;
- };
+ struct fuse_release_in in;
struct path path;
} release;
struct fuse_init_in init_in;
*ptr++ = cpu_to_be64(bn++);
break;
}
- } while ((state != ALLOC_DATA) || !dblock);
+ } while (state != ALLOC_DATA);
ip->i_height = height;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
* fuzziness in the current usage value of IDs that are being used on different
* nodes in the cluster simultaneously. So, it is possible for a user on
* multiple nodes to overrun their quota, but that overrun is controlable.
- * Since quota tags are part of transactions, there is no need for a quota check
+ * Since quota tags are part of transactions, there is no need to a quota check
* program to be run on node crashes or anything like that.
*
* There are couple of knobs that let the administrator manage the quota
#define QUOTA_USER 1
#define QUOTA_GROUP 0
+struct gfs2_quota_host {
+ u64 qu_limit;
+ u64 qu_warn;
+ s64 qu_value;
+ u32 qu_ll_next;
+};
+
struct gfs2_quota_change_host {
u64 qc_change;
u32 qc_flags; /* GFS2_QCF_... */
mutex_unlock(&sdp->sd_quota_mutex);
}
+static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
+{
+ const struct gfs2_quota *str = buf;
+
+ qu->qu_limit = be64_to_cpu(str->qu_limit);
+ qu->qu_warn = be64_to_cpu(str->qu_warn);
+ qu->qu_value = be64_to_cpu(str->qu_value);
+ qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
+}
+
+static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
+{
+ struct gfs2_quota *str = buf;
+
+ str->qu_limit = cpu_to_be64(qu->qu_limit);
+ str->qu_warn = cpu_to_be64(qu->qu_warn);
+ str->qu_value = cpu_to_be64(qu->qu_value);
+ str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
+ memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
+}
+
/**
- * gfs2_adjust_quota - adjust record of current block usage
- * @ip: The quota inode
- * @loc: Offset of the entry in the quota file
- * @change: The amount of change to record
- * @qd: The quota data
+ * gfs2_adjust_quota
*
* This function was mostly borrowed from gfs2_block_truncate_page which was
* in turn mostly borrowed from ext3
- *
- * Returns: 0 or -ve on error
*/
-
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
s64 change, struct gfs2_quota_data *qd)
{
unsigned blocksize, iblock, pos;
struct buffer_head *bh;
struct page *page;
- void *kaddr, *ptr;
- struct gfs2_quota q, *qp;
- int err, nbytes;
+ void *kaddr;
+ char *ptr;
+ struct gfs2_quota_host qp;
+ s64 value;
+ int err = -EIO;
if (gfs2_is_stuffed(ip))
gfs2_unstuff_dinode(ip, NULL);
-
- memset(&q, 0, sizeof(struct gfs2_quota));
- err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
- if (err < 0)
- return err;
-
- err = -EIO;
- qp = &q;
- qp->qu_value = be64_to_cpu(qp->qu_value);
- qp->qu_value += change;
- qp->qu_value = cpu_to_be64(qp->qu_value);
- qd->qd_qb.qb_value = qp->qu_value;
-
- /* Write the quota into the quota file on disk */
- ptr = qp;
- nbytes = sizeof(struct gfs2_quota);
-get_a_page:
+
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
if (!buffer_mapped(bh)) {
gfs2_block_map(inode, iblock, bh, 1);
if (!buffer_mapped(bh))
- goto unlock_out;
- /* If it's a newly allocated disk block for quota, zero it */
- if (buffer_new(bh))
- zero_user(page, pos - blocksize, bh->b_size);
+ goto unlock;
}
if (PageUptodate(page))
ll_rw_block(READ_META, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
- goto unlock_out;
+ goto unlock;
}
gfs2_trans_add_bh(ip->i_gl, bh, 0);
kaddr = kmap_atomic(page, KM_USER0);
- if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
- nbytes = PAGE_CACHE_SIZE - offset;
- memcpy(kaddr + offset, ptr, nbytes);
+ ptr = kaddr + offset;
+ gfs2_quota_in(&qp, ptr);
+ qp.qu_value += change;
+ value = qp.qu_value;
+ gfs2_quota_out(&qp, ptr);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
- unlock_page(page);
- page_cache_release(page);
-
- /* If quota straddles page boundary, we need to update the rest of the
- * quota at the beginning of the next page */
- if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
- ptr = ptr + nbytes;
- nbytes = sizeof(struct gfs2_quota) - nbytes;
- offset = 0;
- index++;
- goto get_a_page;
- }
err = 0;
- return err;
-unlock_out:
+ qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
+ qd->qd_qb.qb_value = cpu_to_be64(value);
+ ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
+ ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
+unlock:
unlock_page(page);
page_cache_release(page);
return err;
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
for (qx = 0; qx < num_qd; qx++) {
- error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
+ error = gfs2_glock_nq_init(qda[qx]->qd_gl,
+ LM_ST_EXCLUSIVE,
GL_NOCACHE, &ghs[qx]);
if (error)
goto out;
* rgrp since it won't be allocated during the transaction
*/
al->al_requested = 1;
- /* +3 in the end for unstuffing block, inode size update block
- * and another block in case quota straddles page boundary and
- * two blocks need to be updated instead of 1 */
- blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
+ /* +1 in the end for block requested above for unstuffing */
+ blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
if (nalloc)
al->al_requested += nalloc * (data_blocks + ind_blocks);
qd = qda[x];
offset = qd2offset(qd);
error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
- (struct gfs2_quota_data *)qd);
+ (struct gfs2_quota_data *)
+ qd);
if (error)
goto out_end_trans;
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_holder i_gh;
- struct gfs2_quota q;
+ struct gfs2_quota_host q;
+ char buf[sizeof(struct gfs2_quota)];
int error;
struct gfs2_quota_lvb *qlvb;
if (error)
goto fail;
- memset(&q, 0, sizeof(struct gfs2_quota));
+ memset(buf, 0, sizeof(struct gfs2_quota));
pos = qd2offset(qd);
- error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
+ error = gfs2_internal_read(ip, NULL, buf, &pos,
+ sizeof(struct gfs2_quota));
if (error < 0)
goto fail_gunlock;
- if ((error < sizeof(q)) && force_refresh) {
- error = -ENOENT;
- goto fail_gunlock;
- }
+
gfs2_glock_dq_uninit(&i_gh);
+ gfs2_quota_in(&q, buf);
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
qlvb->__pad = 0;
- qlvb->qb_limit = q.qu_limit;
- qlvb->qb_warn = q.qu_warn;
- qlvb->qb_value = q.qu_value;
+ qlvb->qb_limit = cpu_to_be64(q.qu_limit);
+ qlvb->qb_warn = cpu_to_be64(q.qu_warn);
+ qlvb->qb_value = cpu_to_be64(q.qu_value);
qd->qd_qb = *qlvb;
if (gfs2_glock_is_blocking(qd->qd_gl)) {
gfs2_glock_dq_uninit(&q_gh);
qd_put(qd);
+
return error;
}
pos += vec->iov_len;
}
- /*
- * If no bytes were started, return the error, and let the
- * generic layer handle the completion.
- */
- if (requested_bytes == 0) {
- nfs_direct_req_release(dreq);
- return result < 0 ? result : -EIO;
- }
-
if (put_dreq(dreq))
nfs_direct_complete(dreq);
- return 0;
+
+ if (requested_bytes != 0)
+ return 0;
+
+ if (result < 0)
+ return result;
+ return -EIO;
}
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
pos += vec->iov_len;
}
- /*
- * If no bytes were started, return the error, and let the
- * generic layer handle the completion.
- */
- if (requested_bytes == 0) {
- nfs_direct_req_release(dreq);
- return result < 0 ? result : -EIO;
- }
-
if (put_dreq(dreq))
nfs_direct_write_complete(dreq, dreq->inode);
- return 0;
+
+ if (requested_bytes != 0)
+ return 0;
+
+ if (result < 0)
+ return result;
+ return -EIO;
}
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
if (have_error)
ret = xchg(&ctx->error, 0);
- if (!ret && status < 0)
+ if (!ret)
ret = status;
return ret;
}
return ret;
}
-static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
struct nfs_inode *nfsi = NFS_I(inode);
- unsigned long ret = 0;
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
nfsi->change_attr = fattr->change_attr;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
- ret |= NFS_INO_INVALID_ATTR;
}
/* If we have atomic WCC data, we may update some attributes */
if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
&& (fattr->valid & NFS_ATTR_FATTR_CTIME)
- && timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
- memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
- ret |= NFS_INO_INVALID_ATTR;
- }
+ && timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
&& (fattr->valid & NFS_ATTR_FATTR_MTIME)
&& timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
- memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
- if (S_ISDIR(inode->i_mode))
- nfsi->cache_validity |= NFS_INO_INVALID_DATA;
- ret |= NFS_INO_INVALID_ATTR;
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+ if (S_ISDIR(inode->i_mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA;
}
if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
&& i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
- && nfsi->npages == 0) {
- i_size_write(inode, nfs_size_to_loff_t(fattr->size));
- ret |= NFS_INO_INVALID_ATTR;
- }
- return ret;
+ && nfsi->npages == 0)
+ i_size_write(inode, nfs_size_to_loff_t(fattr->size));
}
/**
| NFS_INO_REVAL_PAGECACHE);
/* Do atomic weak cache consistency updates */
- invalid |= nfs_wcc_update_inode(inode, fattr);
+ nfs_wcc_update_inode(inode, fattr);
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
}
}
-static int buf_to_pages_noslab(const void *buf, size_t buflen,
- struct page **pages, unsigned int *pgbase)
-{
- struct page *newpage, **spages;
- int rc = 0;
- size_t len;
- spages = pages;
-
- do {
- len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
- newpage = alloc_page(GFP_KERNEL);
-
- if (newpage == NULL)
- goto unwind;
- memcpy(page_address(newpage), buf, len);
- buf += len;
- buflen -= len;
- *pages++ = newpage;
- rc++;
- } while (buflen != 0);
-
- return rc;
-
-unwind:
- for(; rc > 0; rc--)
- __free_page(spages[rc-1]);
- return -ENOMEM;
-}
-
struct nfs4_cached_acl {
int cached;
size_t len;
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int ret, i;
+ int ret;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
- i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
- if (i < 0)
- return i;
nfs_inode_return_delegation(inode);
+ buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
-
- /*
- * Free each page after tx, so the only ref left is
- * held by the network stack
- */
- for (; i > 0; i--)
- put_page(pages[i-1]);
-
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
return ret;
int status = 0;
/* Ensure exclusive access to NFSv4 state */
- do {
+ for(;;) {
if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
break;
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
break;
- } while (atomic_read(&clp->cl_count) > 1);
+ }
return;
out_error:
printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
if (error < 0)
goto out;
- /*
- * noac is a special case. It implies -o sync, but that's not
- * necessarily reflected in the mtab options. do_remount_sb
- * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
- * remount options, so we have to explicitly reset it.
- */
- if (data->flags & NFS_MOUNT_NOAC)
- *flags |= MS_SYNCHRONOUS;
-
/* compare new mount options with old ones */
error = nfs_compare_remount_data(nfss, data);
out:
exp_readlock();
nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
fh_put(&fh);
+ rqstp->rq_client = NULL;
exp_readunlock();
/* We return nlm error codes as nlm doesn't know
* about nfsd, but nfsd does know about nlm..
void *);
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
- ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
- ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
+ ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */
+ ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */
};
struct nfsd4_operation {
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
- if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
- return status;
+ if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
+ goto out_nfserr;
iattr->ia_valid |= ATTR_UID;
}
if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
READ_BUF(dummy32);
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
- if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
- return status;
+ if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
+ goto out_nfserr;
iattr->ia_valid |= ATTR_GID;
}
if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
+ for (i = 0; i < dummy; ++i)
+ READ32(dummy);
break;
case RPC_AUTH_GSS:
dprintk("RPC_AUTH_GSS callback secflavor "
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
+ p += XDR_QUADLEN(dummy);
break;
default:
dprintk("Illegal callback secflavor\n");
if (ra->p_count == 0)
frap = rap;
}
- depth = nfsdstats.ra_size;
+ depth = nfsdstats.ra_size*11/10;
if (!frap) {
spin_unlock(&rab->pb_lock);
return NULL;
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
if (IS_ERR(dchild))
goto out_nfserr;
- /* If file doesn't exist, check for permissions to create one */
- if (!dchild->d_inode) {
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
- if (err)
- goto out;
- }
-
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page))
+ if (PageMappedToDisk(page)) {
+ unlock_page(page);
goto mapped;
-
+ }
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
int fully_mapped = 1;
if (fully_mapped) {
SetPageMappedToDisk(page);
+ unlock_page(page);
goto mapped;
}
}
return VM_FAULT_SIGBUS;
ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
- if (ret != VM_FAULT_LOCKED) {
+ if (unlikely(ret)) {
nilfs_transaction_abort(inode->i_sb);
return ret;
}
- nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode,
- 1 << (PAGE_SHIFT - inode->i_blkbits));
nilfs_transaction_commit(inode->i_sb);
mapped:
SetPageChecked(page);
wait_on_page_writeback(page);
- return VM_FAULT_LOCKED;
+ return 0;
}
static const struct vm_operations_struct nilfs_file_vm_ops = {
ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
&cluster_start, &cluster_end);
- /* treat the write as new if the a hole/lseek spanned across
- * the page boundary.
- */
- new = new | ((i_size_read(inode) <= page_offset(page)) &&
- (page_offset(page) <= user_pos));
-
if (page == wc->w_target_page) {
map_from = user_pos & (PAGE_CACHE_SIZE - 1);
map_to = map_from + user_len;
u32 num_clusters, unsigned int e_flags)
{
int ret, delete, index, credits = 0;
- u32 new_bit, new_len, orig_num_clusters;
+ u32 new_bit, new_len;
unsigned int set_len;
struct ocfs2_super *osb = OCFS2_SB(sb);
handle_t *handle;
goto out;
}
- orig_num_clusters = num_clusters;
-
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
p_cluster, num_clusters,
* in write-back mode.
*/
if (context->get_clusters == ocfs2_di_get_clusters) {
- ret = ocfs2_cow_sync_writeback(sb, context, cpos,
- orig_num_clusters);
+ ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
if (ret)
mlog_errno(ret);
}
return c;
}
- return NULL;
+ return c;
}
/*
goto fail;
}
- /* Check that sizeof_partition_entry has the correct value */
- if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
- pr_debug("GUID Partitition Entry Size check failed.\n");
- goto fail;
- }
-
if (!(*ptes = alloc_read_gpt_entries(bdev, *gpt)))
goto fail;
}
vm->vblk_size = get_unaligned_be32(data + 0x08);
- if (vm->vblk_size == 0) {
- ldm_error ("Illegal VBLK size");
- return false;
- }
-
vm->vblk_offset = get_unaligned_be32(data + 0x0C);
vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
BUG_ON (!data || !frags);
- if (size < 2 * VBLK_SIZE_HEAD) {
- ldm_error("Value of size is to small.");
- return false;
- }
-
group = get_unaligned_be32(data + 0x08);
rec = get_unaligned_be16(data + 0x0C);
num = get_unaligned_be16(data + 0x0E);
ldm_error ("A VBLK claims to have %d parts.", num);
return false;
}
- if (rec >= num) {
- ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
- return false;
- }
list_for_each (item, frags) {
f = list_entry (item, struct frag, list);
f->map |= (1 << rec);
- data += VBLK_SIZE_HEAD;
- size -= VBLK_SIZE_HEAD;
-
+ if (num > 0) {
+ data += VBLK_SIZE_HEAD;
+ size -= VBLK_SIZE_HEAD;
+ }
memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
return true;
int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
{
+ int slot = 1;
Sector sect;
unsigned char *data;
- int slot, blocks_in_map;
+ int blk, blocks_in_map;
unsigned secsize;
#ifdef CONFIG_PPC_PMAC
int found_root = 0;
put_dev_sector(sect);
return 0; /* not a MacOS disk */
}
- blocks_in_map = be32_to_cpu(part->map_count);
- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
- put_dev_sector(sect);
- return 0;
- }
printk(" [mac]");
- for (slot = 1; slot <= blocks_in_map; ++slot) {
- int pos = slot * secsize;
+ blocks_in_map = be32_to_cpu(part->map_count);
+ for (blk = 1; blk <= blocks_in_map; ++blk) {
+ int pos = blk * secsize;
put_dev_sector(sect);
data = read_dev_sector(bdev, pos/512, §);
if (!data)
}
if (goodness > found_root_goodness) {
- found_root = slot;
+ found_root = blk;
found_root_goodness = goodness;
}
}
#endif /* CONFIG_PPC_PMAC */
+
+ ++slot;
}
#ifdef CONFIG_PPC_PMAC
if (found_root_goodness)
#include "check.h"
#include "osf.h"
-#define MAX_OSF_PARTITIONS 18
-
int osf_partition(struct parsed_partitions *state, struct block_device *bdev)
{
int i;
int slot = 1;
- unsigned int npartitions;
Sector sect;
unsigned char *data;
struct disklabel {
u8 p_fstype;
u8 p_frag;
__le16 p_cpg;
- } d_partitions[MAX_OSF_PARTITIONS];
+ } d_partitions[8];
} * label;
struct d_partition * partition;
put_dev_sector(sect);
return 0;
}
- npartitions = le16_to_cpu(label->d_npartitions);
- if (npartitions > MAX_OSF_PARTITIONS) {
- put_dev_sector(sect);
- return 0;
- }
- for (i = 0 ; i < npartitions; i++, partition++) {
+ for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) {
if (slot == state->limit)
break;
if (le32_to_cpu(partition->p_size))
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
- cred = get_task_cred(p);
+ cred = get_cred((struct cred *) __task_cred(p));
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
task_sig(m, task);
task_cap(m, task);
cpuset_task_status_allowed(m, task);
+#if defined(CONFIG_S390)
+ task_show_regs(m, task);
+#endif
task_context_switch_counts(m, task);
return 0;
}
vsize,
mm ? get_mm_rss(mm) : 0,
rsslim,
- mm ? (permitted ? mm->start_code : 1) : 0,
- mm ? (permitted ? mm->end_code : 1) : 0,
+ mm ? mm->start_code : 0,
+ mm ? mm->end_code : 0,
(permitted && mm) ? mm->start_stack : 0,
esp,
eip,
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int nr;
- struct task_struct *reaper;
+ unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+ struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
struct tgid_iter iter;
struct pid_namespace *ns;
- if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
- goto out_no_task;
- nr = filp->f_pos - FIRST_PROCESS_ENTRY;
-
- reaper = get_proc_task(filp->f_path.dentry->d_inode);
if (!reaper)
goto out_no_task;
const char *name = arch_vma_name(vma);
if (!name) {
if (mm) {
- if (vma->vm_start <= mm->brk &&
- vma->vm_end >= mm->start_brk) {
+ if (vma->vm_start <= mm->start_brk &&
+ vma->vm_end >= mm->brk) {
name = "[heap]";
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
*/
int dquot_commit(struct dquot *dquot)
{
- int ret = 0;
+ int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dqopt->dqio_mutex);
spin_unlock(&dq_list_lock);
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
- else
- ret = -EIO;
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
+ if (ret >= 0)
+ ret = ret2;
+ }
out_sem:
mutex_unlock(&dqopt->dqio_mutex);
return ret;
SetPageDirty(page);
unlock_page(page);
- put_page(page);
}
return 0;
if (!(out_file->f_mode & FMODE_WRITE))
goto fput_out;
retval = -EINVAL;
+ if (!out_file->f_op || !out_file->f_op->sendpage)
+ goto fput_out;
in_inode = in_file->f_path.dentry->d_inode;
out_inode = out_file->f_path.dentry->d_inode;
retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
ret = buf->ops->confirm(pipe, buf);
if (!ret) {
more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
- if (file->f_op && file->f_op->sendpage)
- ret = file->f_op->sendpage(file, buf->page, buf->offset,
- sd->len, &pos, more);
- else
- ret = -EINVAL;
+
+ ret = file->f_op->sendpage(file, buf->page, buf->offset,
+ sd->len, &pos, more);
}
return ret;
if (unlikely(ret < 0))
return ret;
- if (out->f_op && out->f_op->splice_write)
- splice_write = out->f_op->splice_write;
- else
+ splice_write = out->f_op->splice_write;
+ if (!splice_write)
splice_write = default_file_splice_write;
return splice_write(pipe, out, ppos, len, flags);
if (unlikely(ret < 0))
return ret;
- if (in->f_op && in->f_op->splice_read)
- splice_read = in->f_op->splice_read;
- else
+ splice_read = in->f_op->splice_read;
+ if (!splice_read)
splice_read = default_file_splice_read;
return splice_read(in, ppos, pipe, len, flags);
if (off_in)
return -ESPIPE;
if (off_out) {
- if (!out->f_op || !out->f_op->llseek ||
- out->f_op->llseek == no_llseek)
+ if (out->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
if (off_out)
return -ESPIPE;
if (off_in) {
- if (!in->f_op || !in->f_op->llseek ||
- in->f_op->llseek == no_llseek)
+ if (in->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
-
- /* dir_count should never be larger than 256 */
- if (dir_count > 256)
- goto failed_read;
-
while (dir_count--) {
/*
* Read directory entry.
size = le16_to_cpu(dire->size) + 1;
- /* size should never be larger than SQUASHFS_NAME_LEN */
- if (size > SQUASHFS_NAME_LEN)
- goto failed_read;
-
err = squashfs_read_metadata(inode->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
-
- /* dir_count should never be larger than 256 */
- if (dir_count > 256)
- goto data_error;
-
while (dir_count--) {
/*
* Read directory entry.
size = le16_to_cpu(dire->size) + 1;
- /* size should never be larger than SQUASHFS_NAME_LEN */
- if (size > SQUASHFS_NAME_LEN)
- goto data_error;
-
err = squashfs_read_metadata(dir->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
d_add(dentry, inode);
return ERR_PTR(0);
-data_error:
- err = -EIO;
-
read_failure:
ERROR("Unable to read directory block [%llx:%x]\n",
squashfs_i(dir)->start + msblk->directory_table,
size_t sz;
if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX))
- return 0;
+ goto out;
INIT_LIST_HEAD(&list);
void dbg_save_space_info(struct ubifs_info *c)
{
struct ubifs_debug_info *d = c->dbg;
- int freeable_cnt;
- spin_lock(&c->space_lock);
- memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
+ ubifs_get_lp_stats(c, &d->saved_lst);
- /*
- * We use a dirty hack here and zero out @c->freeable_cnt, because it
- * affects the free space calculations, and UBIFS might not know about
- * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
- * only when we read their lprops, and we do this only lazily, upon the
- * need. So at any given point of time @c->freeable_cnt might be not
- * exactly accurate.
- *
- * Just one example about the issue we hit when we did not zero
- * @c->freeable_cnt.
- * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
- * amount of free space in @d->saved_free
- * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
- * information from flash, where we cache LEBs from various
- * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
- * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
- * -> 'ubifs_get_pnode()' -> 'update_cats()'
- * -> 'ubifs_add_to_cat()').
- * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
- * becomes %1.
- * 4. We calculate the amount of free space when the re-mount is
- * finished in 'dbg_check_space_info()' and it does not match
- * @d->saved_free.
- */
- freeable_cnt = c->freeable_cnt;
- c->freeable_cnt = 0;
+ spin_lock(&c->space_lock);
d->saved_free = ubifs_get_free_space_nolock(c);
- c->freeable_cnt = freeable_cnt;
spin_unlock(&c->space_lock);
}
{
struct ubifs_debug_info *d = c->dbg;
struct ubifs_lp_stats lst;
- long long free;
- int freeable_cnt;
+ long long avail, free;
spin_lock(&c->space_lock);
- freeable_cnt = c->freeable_cnt;
- c->freeable_cnt = 0;
- free = ubifs_get_free_space_nolock(c);
- c->freeable_cnt = freeable_cnt;
+ avail = ubifs_calc_available(c, c->min_idx_lebs);
spin_unlock(&c->space_lock);
+ free = ubifs_get_free_space(c);
if (free != d->saved_free) {
ubifs_err("free space changed from %lld to %lld",
}
fname = "dump_lprops";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
d->dfs_dump_lprops = dent;
fname = "dump_budg";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
d->dfs_dump_budg = dent;
fname = "dump_tnc";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
d->dfs_dump_tnc = dent;
dbg_gen("syncing inode %lu", inode->i_ino);
- if (inode->i_sb->s_flags & MS_RDONLY)
- return 0;
-
/*
* VFS has already synchronized dirty pages for this inode. Synchronize
* the inode unless this is a 'datasync()' call.
lnum = branch->lnum;
offs = branch->offs;
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
- if (!pnode)
- return -ENOMEM;
-
+ if (!pnode) {
+ err = -ENOMEM;
+ goto out;
+ }
if (lnum == 0) {
/*
* This pnode was not written which just means that the LEB
goto out_free;
}
memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
-
- /*
- * We had to recover the master node, which means there was an
- * unclean reboot. However, it is possible that the master node
- * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
- * E.g., consider the following chain of events:
- *
- * 1. UBIFS was cleanly unmounted, so the master node is clean
- * 2. UBIFS is being mounted R/W and starts changing the master
- * node in the first (%UBIFS_MST_LNUM). A power cut happens,
- * so this LEB ends up with some amount of garbage at the
- * end.
- * 3. UBIFS is being mounted R/O. We reach this place and
- * recover the master node from the second LEB
- * (%UBIFS_MST_LNUM + 1). But we cannot update the media
- * because we are being mounted R/O. We have to defer the
- * operation.
- * 4. However, this master node (@c->mst_node) is marked as
- * clean (since the step 1). And if we just return, the
- * mount code will be confused and won't recover the master
- * node when it is re-mounter R/W later.
- *
- * Thus, to force the recovery by marking the master node as
- * dirty.
- */
- c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
} else {
/* Write the recovered master node */
c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
return ERR_PTR(-ESTALE);
/*
- * The XFS_IGET_UNTRUSTED means that an invalid inode number is just
- * fine and not an indication of a corrupted filesystem as clients can
- * send invalid file handles and we have to handle it gracefully..
+ * The XFS_IGET_BULKSTAT means that an invalid inode number is just
+ * fine and not an indication of a corrupted filesystem. Because
+ * clients can send any kind of invalid file handle, e.g. after
+ * a restore on the server we have to deal with this case gracefully.
*/
- error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED,
- XFS_ILOCK_SHARED, &ip);
+ error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT,
+ XFS_ILOCK_SHARED, &ip, 0);
if (error) {
/*
* EINVAL means the inode cluster doesn't exist anymore.
error = xfs_bulkstat_single(mp, &inlast,
bulkreq.ubuffer, &done);
else /* XFS_IOC_FSBULKSTAT */
- error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
- sizeof(xfs_bstat_t), bulkreq.ubuffer,
- &done);
+ error = xfs_bulkstat(mp, &inlast, &count,
+ (bulkstat_one_pf)xfs_bulkstat_one, NULL,
+ sizeof(xfs_bstat_t), bulkreq.ubuffer,
+ BULKSTAT_FG_QUICK, &done);
if (error)
return -error;
xfs_mount_t *mp,
void __user *arg)
{
- xfs_fsop_geom_t fsgeo;
+ xfs_fsop_geom_v1_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, &fsgeo, 3);
+ error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
if (error)
return -error;
- /*
- * Caller should have passed an argument of type
- * xfs_fsop_geom_v1_t. This is a proper subset of the
- * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
- */
- if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
+ if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
return -XFS_ERROR(EFAULT);
return 0;
}
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* buffer to place output in */
int ubsize, /* size of buffer */
+ void *private_data, /* my private data */
+ xfs_daddr_t bno, /* starting bno of inode cluster */
int *ubused, /* bytes used by me */
+ void *dibuff, /* on-disk inode buffer */
int *stat) /* BULKSTAT_RV_... */
{
return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
- xfs_bulkstat_one_fmt_compat,
- ubused, stat);
+ xfs_bulkstat_one_fmt_compat, bno,
+ ubused, dibuff, stat);
}
/* copied from xfs_ioctl.c */
int res;
error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
- sizeof(compat_xfs_bstat_t), 0, &res);
+ sizeof(compat_xfs_bstat_t),
+ NULL, 0, NULL, NULL, &res);
} else if (cmd == XFS_IOC_FSBULKSTAT_32) {
error = xfs_bulkstat(mp, &inlast, &count,
- xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
- bulkreq.ubuffer, &done);
+ xfs_bulkstat_one_compat, NULL,
+ sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
+ BULKSTAT_FG_QUICK, &done);
} else
error = XFS_ERROR(EINVAL);
if (error)
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* not used */
int ubsize, /* not used */
+ void *private_data, /* not used */
+ xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
+ void *dip, /* on-disk inode pointer (not used) */
int *res) /* result code value */
{
xfs_inode_t *ip;
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
- if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) {
+ if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
*res = BULKSTAT_RV_NOTHING;
return error;
}
* Iterate thru all the inodes in the file system,
* adjusting the corresponding dquot counters in core.
*/
- error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_dqusage_adjust,
- structsz, NULL, &done);
- if (error)
+ if ((error = xfs_bulkstat(mp, &lastino, &count,
+ xfs_qm_dqusage_adjust, NULL,
+ structsz, NULL, BULKSTAT_FG_IGET, &done)))
break;
- } while (!done);
+ } while (! done);
/*
* We've made all the changes that we need to make incore.
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip)))
+ 0, 0, &uip, 0)))
return XFS_ERROR(error);
}
if (XFS_IS_OQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip))) {
+ 0, 0, &gip, 0))) {
if (uip)
IRELE(uip);
return XFS_ERROR(error);
}
if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
- error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip);
+ error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
if (!error) {
error = xfs_truncate_file(mp, qip);
IRELE(qip);
if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
- error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip);
+ error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
if (!error2) {
error2 = xfs_truncate_file(mp, qip);
IRELE(qip);
}
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip) == 0)
+ 0, 0, &uip, 0) == 0)
tempuqip = B_TRUE;
}
if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip) == 0)
+ 0, 0, &gip, 0) == 0)
tempgqip = B_TRUE;
}
if (uip) {
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* not used */
int ubsize, /* not used */
+ void *private_data, /* not used */
+ xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
+ void *dip, /* not used */
int *res) /* bulkstat result code */
{
xfs_inode_t *ip;
ipreleased = B_FALSE;
again:
lock_flags = XFS_ILOCK_SHARED;
- if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
+ if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) {
*res = BULKSTAT_RV_NOTHING;
return (error);
}
* Iterate thru all the inodes in the file system,
* adjusting the corresponding dquot counters
*/
- error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_internalqcheck_adjust,
- 0, NULL, &done);
- if (error) {
- cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
+ if ((error = xfs_bulkstat(mp, &lastino, &count,
+ xfs_qm_internalqcheck_adjust, NULL,
+ 0, NULL, BULKSTAT_FG_IGET, &done))) {
break;
}
- } while (!done);
-
+ } while (! done);
+ if (error) {
+ cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
+ }
cmn_err(CE_DEBUG, "Checking results against system dquots");
for (i = 0; i < qmtest_hashmask; i++) {
h1 = &qmtest_udqtab[i];
xfs_fsop_geom_t *geo,
int new_version)
{
-
- memset(geo, 0, sizeof(*geo));
-
geo->blocksize = mp->m_sb.sb_blocksize;
geo->rtextsize = mp->m_sb.sb_rextsize;
geo->agblocks = mp->m_sb.sb_agblocks;
return error;
}
-STATIC int
-xfs_imap_lookup(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- xfs_agnumber_t agno,
- xfs_agino_t agino,
- xfs_agblock_t agbno,
- xfs_agblock_t *chunk_agbno,
- xfs_agblock_t *offset_agbno,
- int flags)
-{
- struct xfs_inobt_rec_incore rec;
- struct xfs_btree_cur *cur;
- struct xfs_buf *agbp;
- int error;
- int i;
-
- down_read(&mp->m_peraglock);
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
- up_read(&mp->m_peraglock);
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_ialloc_read_agi() returned "
- "error %d, agno %d",
- error, agno);
- return error;
- }
-
- /*
- * Lookup the inode record for the given agino. If the record cannot be
- * found, then it's an invalid inode number and we should abort. Once
- * we have a record, we need to ensure it contains the inode number
- * we are looking up.
- */
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
- error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
- if (!error) {
- if (i)
- error = xfs_inobt_get_rec(cur, &rec, &i);
- if (!error && i == 0)
- error = EINVAL;
- }
-
- xfs_trans_brelse(tp, agbp);
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- if (error)
- return error;
-
- /* check that the returned record contains the required inode */
- if (rec.ir_startino > agino ||
- rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino)
- return EINVAL;
-
- /* for untrusted inodes check it is allocated first */
- if ((flags & XFS_IGET_UNTRUSTED) &&
- (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
- return EINVAL;
-
- *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
- *offset_agbno = agbno - *chunk_agbno;
- return 0;
-}
-
/*
* Return the location of the inode in imap, for mapping it into a buffer.
*/
if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
#ifdef DEBUG
- /*
- * Don't output diagnostic information for untrusted inodes
- * as they can be invalid without implying corruption.
- */
- if (flags & XFS_IGET_UNTRUSTED)
+ /* no diagnostics for bulkstat, ino comes from userspace */
+ if (flags & XFS_IGET_BULKSTAT)
return XFS_ERROR(EINVAL);
if (agno >= mp->m_sb.sb_agcount) {
xfs_fs_cmn_err(CE_ALERT, mp,
return XFS_ERROR(EINVAL);
}
- blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
-
- /*
- * For bulkstat and handle lookups, we have an untrusted inode number
- * that we have to verify is valid. We cannot do this just by reading
- * the inode buffer as it may have been unlinked and removed leaving
- * inodes in stale state on disk. Hence we have to do a btree lookup
- * in all cases where an untrusted inode number is passed.
- */
- if (flags & XFS_IGET_UNTRUSTED) {
- error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
- &chunk_agbno, &offset_agbno, flags);
- if (error)
- return error;
- goto out_map;
- }
-
/*
* If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics.
return 0;
}
+ blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
+
+ /*
+ * If we get a block number passed from bulkstat we can use it to
+ * find the buffer easily.
+ */
+ if (imap->im_blkno) {
+ offset = XFS_INO_TO_OFFSET(mp, ino);
+ ASSERT(offset < mp->m_sb.sb_inopblock);
+
+ cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno);
+ offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock;
+
+ imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
+ imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
+ return 0;
+ }
+
/*
* If the inode chunks are aligned then use simple maths to
* find the location. Otherwise we have to do a btree
offset_agbno = agbno & mp->m_inoalign_mask;
chunk_agbno = agbno - offset_agbno;
} else {
- error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
- &chunk_agbno, &offset_agbno, flags);
+ xfs_btree_cur_t *cur; /* inode btree cursor */
+ xfs_inobt_rec_incore_t chunk_rec;
+ xfs_buf_t *agbp; /* agi buffer */
+ int i; /* temp state */
+
+ down_read(&mp->m_peraglock);
+ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ up_read(&mp->m_peraglock);
+ if (error) {
+ xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
+ "xfs_ialloc_read_agi() returned "
+ "error %d, agno %d",
+ error, agno);
+ return error;
+ }
+
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
+ if (error) {
+ xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
+ "xfs_inobt_lookup() failed");
+ goto error0;
+ }
+
+ error = xfs_inobt_get_rec(cur, &chunk_rec, &i);
+ if (error) {
+ xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
+ "xfs_inobt_get_rec() failed");
+ goto error0;
+ }
+ if (i == 0) {
+#ifdef DEBUG
+ xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
+ "xfs_inobt_get_rec() failed");
+#endif /* DEBUG */
+ error = XFS_ERROR(EINVAL);
+ }
+ error0:
+ xfs_trans_brelse(tp, agbp);
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
if (error)
return error;
+ chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino);
+ offset_agbno = agbno - chunk_agbno;
}
-out_map:
ASSERT(agbno >= chunk_agbno);
cluster_agbno = chunk_agbno +
((offset_agbno / blks_per_cluster) * blks_per_cluster);
xfs_trans_t *tp,
xfs_ino_t ino,
struct xfs_inode **ipp,
+ xfs_daddr_t bno,
int flags,
int lock_flags) __releases(pag->pag_ici_lock)
{
if (!ip)
return ENOMEM;
- error = xfs_iread(mp, tp, ip, flags);
+ error = xfs_iread(mp, tp, ip, bno, flags);
if (error)
goto out_destroy;
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
+ * bno -- the block number starting the buffer containing the inode,
+ * if known (as by bulkstat), else 0.
*/
int
xfs_iget(
xfs_ino_t ino,
uint flags,
uint lock_flags,
- xfs_inode_t **ipp)
+ xfs_inode_t **ipp,
+ xfs_daddr_t bno)
{
xfs_inode_t *ip;
int error;
read_unlock(&pag->pag_ici_lock);
XFS_STATS_INC(xs_ig_missed);
- error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
+ error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
flags, lock_flags);
if (error)
goto out_error_or_again;
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP,
XFS_RANDOM_ITOBP_INOTOBP))) {
- if (iget_flags & XFS_IGET_UNTRUSTED) {
+ if (iget_flags & XFS_IGET_BULKSTAT) {
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EINVAL);
}
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_inode_t *ip,
+ xfs_daddr_t bno,
uint iget_flags)
{
xfs_buf_t *bp;
/*
* Fill in the location information in the in-core inode.
*/
+ ip->i_imap.im_blkno = bno;
error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
if (error)
return error;
+ ASSERT(bno == 0 || bno == ip->i_imap.im_blkno);
/*
* Get pointers to the on-disk inode and the buffer containing it.
* xfs_iget.c prototypes.
*/
int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
- uint, uint, xfs_inode_t **);
+ uint, uint, xfs_inode_t **, xfs_daddr_t);
void xfs_iput(xfs_inode_t *, uint);
void xfs_iput_new(xfs_inode_t *, uint);
void xfs_ilock(xfs_inode_t *, uint);
* Flags for xfs_iget()
*/
#define XFS_IGET_CREATE 0x1
-#define XFS_IGET_UNTRUSTED 0x2
+#define XFS_IGET_BULKSTAT 0x2
int xfs_inotobp(struct xfs_mount *, struct xfs_trans *,
xfs_ino_t, struct xfs_dinode **,
struct xfs_inode *, struct xfs_dinode **,
struct xfs_buf **, uint);
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
- struct xfs_inode *, uint);
+ struct xfs_inode *, xfs_daddr_t, uint);
void xfs_dinode_to_disk(struct xfs_dinode *,
struct xfs_icdinode *);
void xfs_idestroy_fork(struct xfs_inode *, int);
(ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
}
-/*
- * Return stat information for one inode.
- * Return 0 if ok, else errno.
- */
-int
-xfs_bulkstat_one_int(
- struct xfs_mount *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode to get data for */
- void __user *buffer, /* buffer to place output in */
- int ubsize, /* size of buffer */
- bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
- int *ubused, /* bytes used by me */
- int *stat) /* BULKSTAT_RV_... */
+STATIC int
+xfs_bulkstat_one_iget(
+ xfs_mount_t *mp, /* mount point for filesystem */
+ xfs_ino_t ino, /* inode number to get data for */
+ xfs_daddr_t bno, /* starting bno of inode cluster */
+ xfs_bstat_t *buf, /* return buffer */
+ int *stat) /* BULKSTAT_RV_... */
{
- struct xfs_icdinode *dic; /* dinode core info pointer */
- struct xfs_inode *ip; /* incore inode pointer */
- struct inode *inode;
- struct xfs_bstat *buf; /* return buffer */
- int error = 0; /* error value */
-
- *stat = BULKSTAT_RV_NOTHING;
-
- if (!buffer || xfs_internal_inum(mp, ino))
- return XFS_ERROR(EINVAL);
-
- buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
- if (!buf)
- return XFS_ERROR(ENOMEM);
+ xfs_icdinode_t *dic; /* dinode core info pointer */
+ xfs_inode_t *ip; /* incore inode pointer */
+ struct inode *inode;
+ int error;
error = xfs_iget(mp, NULL, ino,
- XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip);
+ XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno);
if (error) {
*stat = BULKSTAT_RV_NOTHING;
- goto out_free;
+ return error;
}
ASSERT(ip != NULL);
buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
break;
}
+
xfs_iput(ip, XFS_ILOCK_SHARED);
+ return error;
+}
- error = formatter(buffer, ubsize, ubused, buf);
+STATIC void
+xfs_bulkstat_one_dinode(
+ xfs_mount_t *mp, /* mount point for filesystem */
+ xfs_ino_t ino, /* inode number to get data for */
+ xfs_dinode_t *dic, /* dinode inode pointer */
+ xfs_bstat_t *buf) /* return buffer */
+{
+ /*
+ * The inode format changed when we moved the link count and
+ * made it 32 bits long. If this is an old format inode,
+ * convert it in memory to look like a new one. If it gets
+ * flushed to disk we will convert back before flushing or
+ * logging it. We zero out the new projid field and the old link
+ * count field. We'll handle clearing the pad field (the remains
+ * of the old uuid field) when we actually convert the inode to
+ * the new format. We don't change the version number so that we
+ * can distinguish this from a real new format inode.
+ */
+ if (dic->di_version == 1) {
+ buf->bs_nlink = be16_to_cpu(dic->di_onlink);
+ buf->bs_projid = 0;
+ } else {
+ buf->bs_nlink = be32_to_cpu(dic->di_nlink);
+ buf->bs_projid = be16_to_cpu(dic->di_projid);
+ }
- if (!error)
- *stat = BULKSTAT_RV_DIDONE;
+ buf->bs_ino = ino;
+ buf->bs_mode = be16_to_cpu(dic->di_mode);
+ buf->bs_uid = be32_to_cpu(dic->di_uid);
+ buf->bs_gid = be32_to_cpu(dic->di_gid);
+ buf->bs_size = be64_to_cpu(dic->di_size);
+ buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec);
+ buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec);
+ buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec);
+ buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec);
+ buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec);
+ buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec);
+ buf->bs_xflags = xfs_dic2xflags(dic);
+ buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog;
+ buf->bs_extents = be32_to_cpu(dic->di_nextents);
+ buf->bs_gen = be32_to_cpu(dic->di_gen);
+ memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
+ buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask);
+ buf->bs_dmstate = be16_to_cpu(dic->di_dmstate);
+ buf->bs_aextents = be16_to_cpu(dic->di_anextents);
- out_free:
- kmem_free(buf);
- return error;
+ switch (dic->di_format) {
+ case XFS_DINODE_FMT_DEV:
+ buf->bs_rdev = xfs_dinode_get_rdev(dic);
+ buf->bs_blksize = BLKDEV_IOSIZE;
+ buf->bs_blocks = 0;
+ break;
+ case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_UUID:
+ buf->bs_rdev = 0;
+ buf->bs_blksize = mp->m_sb.sb_blocksize;
+ buf->bs_blocks = 0;
+ break;
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ buf->bs_rdev = 0;
+ buf->bs_blksize = mp->m_sb.sb_blocksize;
+ buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
+ break;
+ }
}
/* Return 0 on success or positive error */
return 0;
}
+/*
+ * Return stat information for one inode.
+ * Return 0 if ok, else errno.
+ */
+int /* error status */
+xfs_bulkstat_one_int(
+ xfs_mount_t *mp, /* mount point for filesystem */
+ xfs_ino_t ino, /* inode number to get data for */
+ void __user *buffer, /* buffer to place output in */
+ int ubsize, /* size of buffer */
+ bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
+ xfs_daddr_t bno, /* starting bno of inode cluster */
+ int *ubused, /* bytes used by me */
+ void *dibuff, /* on-disk inode buffer */
+ int *stat) /* BULKSTAT_RV_... */
+{
+ xfs_bstat_t *buf; /* return buffer */
+ int error = 0; /* error value */
+ xfs_dinode_t *dip; /* dinode inode pointer */
+
+ dip = (xfs_dinode_t *)dibuff;
+ *stat = BULKSTAT_RV_NOTHING;
+
+ if (!buffer || xfs_internal_inum(mp, ino))
+ return XFS_ERROR(EINVAL);
+
+ buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
+
+ if (dip == NULL) {
+ /* We're not being passed a pointer to a dinode. This happens
+ * if BULKSTAT_FG_IGET is selected. Do the iget.
+ */
+ error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
+ if (error)
+ goto out_free;
+ } else {
+ xfs_bulkstat_one_dinode(mp, ino, dip, buf);
+ }
+
+ error = formatter(buffer, ubsize, ubused, buf);
+ if (error)
+ goto out_free;
+
+ *stat = BULKSTAT_RV_DIDONE;
+
+ out_free:
+ kmem_free(buf);
+ return error;
+}
+
int
xfs_bulkstat_one(
xfs_mount_t *mp, /* mount point for filesystem */
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* buffer to place output in */
int ubsize, /* size of buffer */
+ void *private_data, /* my private data */
+ xfs_daddr_t bno, /* starting bno of inode cluster */
int *ubused, /* bytes used by me */
+ void *dibuff, /* on-disk inode buffer */
int *stat) /* BULKSTAT_RV_... */
{
return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
- xfs_bulkstat_one_fmt, ubused, stat);
+ xfs_bulkstat_one_fmt, bno,
+ ubused, dibuff, stat);
+}
+
+/*
+ * Test to see whether we can use the ondisk inode directly, based
+ * on the given bulkstat flags, filling in dipp accordingly.
+ * Returns zero if the inode is dodgey.
+ */
+STATIC int
+xfs_bulkstat_use_dinode(
+ xfs_mount_t *mp,
+ int flags,
+ xfs_buf_t *bp,
+ int clustidx,
+ xfs_dinode_t **dipp)
+{
+ xfs_dinode_t *dip;
+ unsigned int aformat;
+
+ *dipp = NULL;
+ if (!bp || (flags & BULKSTAT_FG_IGET))
+ return 1;
+ dip = (xfs_dinode_t *)
+ xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
+ /*
+ * Check the buffer containing the on-disk inode for di_mode == 0.
+ * This is to prevent xfs_bulkstat from picking up just reclaimed
+ * inodes that have their in-core state initialized but not flushed
+ * to disk yet. This is a temporary hack that would require a proper
+ * fix in the future.
+ */
+ if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
+ !XFS_DINODE_GOOD_VERSION(dip->di_version) ||
+ !dip->di_mode)
+ return 0;
+ if (flags & BULKSTAT_FG_QUICK) {
+ *dipp = dip;
+ return 1;
+ }
+ /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */
+ aformat = dip->di_aformat;
+ if ((XFS_DFORK_Q(dip) == 0) ||
+ (aformat == XFS_DINODE_FMT_LOCAL) ||
+ (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) {
+ *dipp = dip;
+ return 1;
+ }
+ return 1;
}
#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
xfs_ino_t *lastinop, /* last inode returned */
int *ubcountp, /* size of buffer/count returned */
bulkstat_one_pf formatter, /* func that'd fill a single buf */
+ void *private_data,/* private data for formatter */
size_t statstruct_size, /* sizeof struct filling */
char __user *ubuffer, /* buffer with inode stats */
+ int flags, /* defined in xfs_itable.h */
int *done) /* 1 if there are more stats to get */
{
xfs_agblock_t agbno=0;/* allocation group block number */
int ubelem; /* spaces used in user's buffer */
int ubused; /* bytes used by formatter */
xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */
+ xfs_dinode_t *dip; /* ptr into bp for specific inode */
/*
* Get the last inode value, see if there's nothing to do.
*/
ino = (xfs_ino_t)*lastinop;
lastino = ino;
+ dip = NULL;
agno = XFS_INO_TO_AGNO(mp, ino);
agino = XFS_INO_TO_AGINO(mp, ino);
if (agno >= mp->m_sb.sb_agcount ||
irbp->ir_startino) +
((chunkidx & nimask) >>
mp->m_sb.sb_inopblog);
+
+ if (flags & (BULKSTAT_FG_QUICK |
+ BULKSTAT_FG_INLINE)) {
+ int offset;
+
+ ino = XFS_AGINO_TO_INO(mp, agno,
+ agino);
+ bno = XFS_AGB_TO_DADDR(mp, agno,
+ agbno);
+
+ /*
+ * Get the inode cluster buffer
+ */
+ if (bp)
+ xfs_buf_relse(bp);
+
+ error = xfs_inotobp(mp, NULL, ino, &dip,
+ &bp, &offset,
+ XFS_IGET_BULKSTAT);
+
+ if (!error)
+ clustidx = offset / mp->m_sb.sb_inodesize;
+ if (XFS_TEST_ERROR(error != 0,
+ mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
+ XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
+ bp = NULL;
+ ubleft = 0;
+ rval = error;
+ break;
+ }
+ }
}
ino = XFS_AGINO_TO_INO(mp, agno, agino);
bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
* when the chunk is used up.
*/
irbp->ir_freecount++;
+ if (!xfs_bulkstat_use_dinode(mp, flags, bp,
+ clustidx, &dip)) {
+ lastino = ino;
+ continue;
+ }
+ /*
+ * If we need to do an iget, cannot hold bp.
+ * Drop it, until starting the next cluster.
+ */
+ if ((flags & BULKSTAT_FG_INLINE) && !dip) {
+ if (bp)
+ xfs_buf_relse(bp);
+ bp = NULL;
+ }
/*
* Get the inode and fill in a single buffer.
+ * BULKSTAT_FG_QUICK uses dip to fill it in.
+ * BULKSTAT_FG_IGET uses igets.
+ * BULKSTAT_FG_INLINE uses dip if we have an
+ * inline attr fork, else igets.
+ * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
+ * This is also used to count inodes/blks, etc
+ * in xfs_qm_quotacheck.
*/
ubused = statstruct_size;
- error = formatter(mp, ino, ubufp, ubleft,
- &ubused, &fmterror);
+ error = formatter(mp, ino, ubufp,
+ ubleft, private_data,
+ bno, &ubused, dip, &fmterror);
if (fmterror == BULKSTAT_RV_NOTHING) {
if (error && error != ENOENT &&
error != EINVAL) {
*/
ino = (xfs_ino_t)*lastinop;
- error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
+ error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
+ NULL, 0, NULL, NULL, &res);
if (error) {
/*
* Special case way failed, do it the "long" way
(*lastinop)--;
count = 1;
if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
- sizeof(xfs_bstat_t), buffer, done))
+ NULL, sizeof(xfs_bstat_t), buffer,
+ BULKSTAT_FG_IGET, done))
return error;
if (count == 0 || (xfs_ino_t)*lastinop != ino)
return error == EFSCORRUPTED ?
xfs_ino_t ino,
void __user *buffer,
int ubsize,
+ void *private_data,
+ xfs_daddr_t bno,
int *ubused,
+ void *dip,
int *stat);
/*
#define BULKSTAT_RV_DIDONE 1
#define BULKSTAT_RV_GIVEUP 2
+/*
+ * Values for bulkstat flag argument.
+ */
+#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */
+#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */
+#define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */
+
/*
* Return stat information in bulk (by-inode) for the filesystem.
*/
xfs_ino_t *lastino, /* last inode returned */
int *count, /* size of buffer/count returned */
bulkstat_one_pf formatter, /* func that'd fill a single buf */
+ void *private_data, /* private data for formatter */
size_t statstruct_size,/* sizeof struct that we're filling */
char __user *ubuffer,/* buffer with inode stats */
+ int flags, /* flag to control access method */
int *done); /* 1 if there are more stats to get */
int
void __user *buffer,
int ubsize,
bulkstat_one_fmt_pf formatter,
+ xfs_daddr_t bno,
int *ubused,
+ void *dibuff,
int *stat);
int
xfs_ino_t ino,
void __user *buffer,
int ubsize,
+ void *private_data,
+ xfs_daddr_t bno,
int *ubused,
+ void *dibuff,
int *stat);
typedef int (*inumbers_fmt_pf)(
int error;
ino = XFS_AGINO_TO_INO(mp, agno, agino);
- error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
+ error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
if (error)
goto fail;
* Get and sanity-check the root inode.
* Save the pointer to it in the mount structure.
*/
- error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
+ error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
if (error) {
cmn_err(CE_WARN, "XFS: failed to read root inode");
goto out_log_dealloc;
sbp = &mp->m_sb;
if (sbp->sb_rbmino == NULLFSINO)
return 0;
- error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
+ error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0);
if (error)
return error;
ASSERT(mp->m_rbmip != NULL);
ASSERT(sbp->sb_rsumino != NULLFSINO);
- error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
+ error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0);
if (error) {
IRELE(mp->m_rbmip);
return error;
{
int error;
- error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
+ error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0);
if (!error && tp)
xfs_trans_ijoin(tp, *ipp, lock_flags);
return error;
if (error)
goto out;
- error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
+ error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0);
if (error)
goto out_free_name;
struct pci_controller *hose;
#endif
struct drm_sg_mem *sg; /**< Scatter gather memory */
- unsigned int num_crtcs; /**< Number of CRTCs on this device */
+ int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
void *mm_private;
struct address_space *dev_mapping;
{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
* structure of raw payloads passed to add_key() or instantiate key
*/
struct rxrpc_key_data_v1 {
+ u32 kif_version; /* 1 */
u16 security_index;
u16 ticket_length;
u32 expiry; /* time_t */
char buf[BINPRM_BUF_SIZE];
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
- unsigned long vma_pages;
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES];
unsigned long loader, exec;
};
-extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
-extern struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
- int write);
-
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
extern int copy_creds(struct task_struct *, unsigned long);
-extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
#define __task_cred(task) \
((const struct cred *)(rcu_dereference((task)->real_cred)))
+/**
+ * get_task_cred - Get another task's objective credentials
+ * @task: The task to query
+ *
+ * Get the objective credentials of a task, pinning them so that they can't go
+ * away. Accessing a task's credentials directly is not permitted.
+ *
+ * The caller must make sure task doesn't go away, either by holding a ref on
+ * task or by holding tasklist_lock to prevent it from being unlinked.
+ */
+#define get_task_cred(task) \
+({ \
+ struct cred *__cred; \
+ rcu_read_lock(); \
+ __cred = (struct cred *) __task_cred((task)); \
+ get_cred(__cred); \
+ rcu_read_unlock(); \
+ __cred; \
+})
+
/**
* get_current_cred - Get the current task's subjective credentials
*
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
-extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
static inline int task_curr_ret_stack(struct task_struct *t)
{
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
-static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
struct task_struct;
-#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
static inline void account_system_vtime(struct task_struct *tsk)
{
}
-#else
-extern void account_system_vtime(struct task_struct *tsk);
#endif
#if defined(CONFIG_NO_HZ)
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
struct list_head k_list;
void (*get)(struct klist_node *);
void (*put)(struct klist_node *);
-} __attribute__ ((aligned (sizeof(void *))));
+} __attribute__ ((aligned (4)));
#define KLIST_INIT(_name, _get, _put) \
{ .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \
unsigned int f_min;
unsigned int f_max;
u32 ocr_avail;
- struct notifier_block pm_notify;
#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */
/* Only used with MMC_CAP_DISABLE */
int enabled; /* host is enabled */
- int rescan_disable; /* disable card detection */
int nesting_cnt; /* "enable" nesting count */
int en_dis_recurs; /* detect recursion */
unsigned int disable_delay; /* disable delay in msecs */
int mmc_host_enable(struct mmc_host *host);
int mmc_host_disable(struct mmc_host *host);
int mmc_host_lazy_disable(struct mmc_host *host);
-int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *);
static inline void mmc_set_disable_delay(struct mmc_host *host,
unsigned int disable_delay)
return 0;
return dev->ethtool_ops->get_flags(dev);
}
-
-#define MODULE_ALIAS_NETDEV(device) \
- MODULE_ALIAS("netdev-" device)
-
#endif /* __KERNEL__ */
#endif /* _LINUX_NETDEVICE_H */
*/
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
+int next_pidmap(struct pid_namespace *pid_ns, int last);
extern struct pid *alloc_pid(struct pid_namespace *ns);
extern void free_pid(struct pid *pid);
uid_t uid;
struct user_namespace *user_ns;
+#ifdef CONFIG_USER_SCHED
+ struct task_group *tg;
+#ifdef CONFIG_SYSFS
+ struct kobject kobj;
+ struct delayed_work work;
+#endif
+#endif
+
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
* single CPU.
*/
unsigned int cpu_power;
- unsigned int group_weight;
/*
* The CPUs this group covers.
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*task_move_group) (struct task_struct *p, int on_rq);
+ void (*moved_group) (struct task_struct *p, int on_rq);
#endif
};
/*
* Per process flags
*/
-#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
*/
extern unsigned long long cpu_clock(int cpu);
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-/*
- * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
- * The reason for this explicit opt-in is not to have perf penalty with
- * slow sched_clocks.
- */
-extern void enable_sched_clock_irqtime(void);
-extern void disable_sched_clock_irqtime(void);
-#else
-static inline void enable_sched_clock_irqtime(void) {}
-static inline void disable_sched_clock_irqtime(void) {}
-#endif
-
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
extern int __cond_resched_softirq(void);
-#define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
+#define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
+ __cond_resched_softirq(); \
})
/*
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_CGROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
extern struct task_group init_task_group;
+#ifdef CONFIG_USER_SCHED
+extern struct task_group root_task_group;
+extern void set_tg_uid(struct user_struct *user);
+#endif
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
struct usb_serial_port *port,
unsigned int ch);
extern int usb_serial_handle_break(struct usb_serial_port *port);
-extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
- struct tty_struct *tty,
- unsigned int status);
extern int usb_serial_bus_register(struct usb_serial_driver *device);
extern void unix_notinflight(struct file *fp);
extern void unix_gc(void);
extern void wait_for_unix_gc(void);
-extern struct sock *unix_get_socket(struct file *filp);
#define UNIX_HASH_SIZE 256
spinlock_t lock;
unsigned int gc_candidate : 1;
unsigned int gc_maybe_cycle : 1;
- unsigned char recursion_level;
wait_queue_head_t peer_wait;
};
#define unix_sk(__sk) ((struct unix_sock *)__sk)
*/
void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
-/*
- * The TX headroom reserved by mac80211 for its own tx_status functions.
- * This is enough for the radiotap header.
- */
-#define IEEE80211_TX_STATUS_HEADROOM 13
-
/**
* ieee80211_tx_status - transmit status callback
*
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
- SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_LAST
} sctp_verb_t;
}
static inline int scsi_device_enclosure(struct scsi_device *sdev)
{
- return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1;
+ return sdev->inquiry[6] & (1<<6);
}
static inline int scsi_device_protection(struct scsi_device *sdev)
/* platform domain */
#define SND_SOC_DAPM_INPUT(wname) \
{ .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM }
+ .num_kcontrols = 0}
#define SND_SOC_DAPM_OUTPUT(wname) \
{ .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM }
+ .num_kcontrols = 0}
#define SND_SOC_DAPM_MIC(wname, wevent) \
{ .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
+ .num_kcontrols = 0, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
#define SND_SOC_DAPM_HP(wname, wevent) \
{ .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
+ .num_kcontrols = 0, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_SPK(wname, wevent) \
{ .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
+ .num_kcontrols = 0, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_LINE(wname, wevent) \
{ .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
+ .num_kcontrols = 0, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
/* path domain */
/* events that are pre and post DAPM */
#define SND_SOC_DAPM_PRE(wname, wevent) \
{ .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
+ .num_kcontrols = 0, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_POST(wname, wevent) \
{ .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
+ .num_kcontrols = 0, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
config HAVE_UNSTABLE_SCHED_CLOCK
bool
+config GROUP_SCHED
+ bool "Group CPU scheduler"
+ depends on EXPERIMENTAL
+ default n
+ help
+ This feature lets CPU scheduler recognize task groups and control CPU
+ bandwidth allocation to such task groups.
+ In order to create a group from arbitrary set of processes, use
+ CONFIG_CGROUPS. (See Control Group support.)
+
+config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on GROUP_SCHED
+ default GROUP_SCHED
+
+config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on GROUP_SCHED
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
+ to users or control groups (depending on the "Basis for grouping tasks"
+ setting below. If enabled, it will also make it impossible to
+ schedule realtime tasks for non-root users until you allocate
+ realtime bandwidth for them.
+ See Documentation/scheduler/sched-rt-group.txt for more information.
+
+choice
+ depends on GROUP_SCHED
+ prompt "Basis for grouping tasks"
+ default USER_SCHED
+
+config USER_SCHED
+ bool "user id"
+ help
+ This option will choose userid as the basis for grouping
+ tasks, thus providing equal CPU bandwidth to each user.
+
+config CGROUP_SCHED
+ bool "Control groups"
+ depends on CGROUPS
+ help
+ This option allows you to create arbitrary task groups
+ using the "cgroup" pseudo filesystem and control
+ the cpu bandwidth allocated to each such task group.
+ Refer to Documentation/cgroups/cgroups.txt for more
+ information on "cgroup" pseudo filesystem.
+
+endchoice
+
menuconfig CGROUPS
boolean "Control Group support"
help
Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
size is 4096bytes, 512k per 1Gbytes of swap.
-menuconfig CGROUP_SCHED
- bool "Group CPU scheduler"
- depends on EXPERIMENTAL && CGROUPS
- default n
- help
- This feature lets CPU scheduler recognize task groups and control CPU
- bandwidth allocation to such task groups. It uses cgroups to group
- tasks.
-
-if CGROUP_SCHED
-config FAIR_GROUP_SCHED
- bool "Group scheduling for SCHED_OTHER"
- depends on CGROUP_SCHED
- default CGROUP_SCHED
-
-config RT_GROUP_SCHED
- bool "Group scheduling for SCHED_RR/FIFO"
- depends on EXPERIMENTAL
- depends on CGROUP_SCHED
- default n
- help
- This feature lets you explicitly allocate real CPU bandwidth
- to task groups. If enabled, it will also make it impossible to
- schedule realtime tasks for non-root users until you allocate
- realtime bandwidth for them.
- See Documentation/scheduler/sched-rt-group.txt for more information.
-
-endif #CGROUP_SCHED
-
endif # CGROUPS
config MM_OWNER
per cpu and per node queues.
config SLUB
- depends on BROKEN || NUMA || !DISCONTIGMEM
bool "SLUB (Unqueued Allocator)"
help
SLUB is a slab allocator that minimizes cache line usage
pre_start = 0;
read_current_timer(&start);
start_jiffies = jiffies;
- while (time_before_eq(jiffies, start_jiffies + 1)) {
+ while (jiffies <= (start_jiffies + 1)) {
pre_start = start;
read_current_timer(&start);
}
pre_end = 0;
end = post_start;
- while (time_before_eq(jiffies, start_jiffies + 1 +
- DELAY_CALIBRATION_TICKS)) {
+ while (jiffies <=
+ (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
pre_end = end;
read_current_timer(&end);
}
* gcc-3.4 accidentally inlines this function, so use noinline.
*/
-static __initdata DECLARE_COMPLETION(kthreadd_done);
-
static noinline void __init_refok rest_init(void)
__releases(kernel_lock)
{
int pid;
rcu_scheduler_starting();
- /*
- * We need to spawn init first so that it obtains pid-1, however
- * the init task will end up wanting to create kthreads, which, if
- * we schedule it before we create kthreadd, will OOPS.
- */
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
numa_default_policy();
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
- complete(&kthreadd_done);
unlock_kernel();
/*
static int __init kernel_init(void * unused)
{
- /*
- * Wait until kthreadd is all set-up.
- */
- wait_for_completion(&kthreadd_done);
lock_kernel();
/*
#include <linux/syscalls.h>
#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
+#include "cred-internals.h"
/*
* Leveraged for setting/resetting capabilities
return -ENODEV;
trialcs = alloc_trial_cpuset(cs);
- if (!trialcs) {
- retval = -ENOMEM;
- goto out;
- }
+ if (!trialcs)
+ return -ENOMEM;
switch (cft->private) {
case FILE_CPULIST:
}
free_trial_cpuset(trialcs);
-out:
cgroup_unlock();
return retval;
}
--- /dev/null
+/* Internal credentials stuff
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+/*
+ * user.c
+ */
+static inline void sched_switch_user(struct task_struct *p)
+{
+#ifdef CONFIG_USER_SCHED
+ sched_move_task(p);
+#endif /* CONFIG_USER_SCHED */
+}
+
#include <linux/init_task.h>
#include <linux/security.h>
#include <linux/cn_proc.h>
+#include "cred-internals.h"
#if 0
#define kdebug(FMT, ...) \
}
}
-/**
- * get_task_cred - Get another task's objective credentials
- * @task: The task to query
- *
- * Get the objective credentials of a task, pinning them so that they can't go
- * away. Accessing a task's credentials directly is not permitted.
- *
- * The caller must also make sure task doesn't get deleted, either by holding a
- * ref on task or by holding tasklist_lock to prevent it from being unlinked.
- */
-const struct cred *get_task_cred(struct task_struct *task)
-{
- const struct cred *cred;
-
- rcu_read_lock();
-
- do {
- cred = __task_cred((task));
- BUG_ON(!cred);
- } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
-
- rcu_read_unlock();
- return cred;
-}
-
/*
* Allocate blank credentials, such that the credentials can be filled in at a
* later date without risk of ENOMEM.
#endif
atomic_set(&new->usage, 1);
-#ifdef CONFIG_DEBUG_CREDENTIALS
- new->magic = CRED_MAGIC;
-#endif
if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
goto error;
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ new->magic = CRED_MAGIC;
+#endif
return new;
error:
atomic_dec(&old->user->processes);
alter_cred_subscribers(old, -2);
+ sched_switch_user(task);
+
/* send notifications */
if (new->uid != old->uid ||
new->euid != old->euid ||
validate_creds(old);
*new = *old;
- atomic_set(&new->usage, 1);
- set_cred_subscribers(new, 0);
get_uid(new->user);
get_group_info(new->group_info);
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
goto error;
+ atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
put_cred(old);
validate_creds(new);
return new;
if (cred->magic != CRED_MAGIC)
return true;
#ifdef CONFIG_SECURITY_SELINUX
- /*
- * cred->security == NULL if security_cred_alloc_blank() or
- * security_prepare_creds() returned an error.
- */
- if (selinux_is_enabled() && cred->security) {
+ if (selinux_is_enabled()) {
if ((unsigned long) cred->security < PAGE_SIZE)
return true;
if ((*(u32 *)cred->security & 0xffffff00) ==
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
+#include "cred-internals.h"
static void exit_mm(struct task_struct * tsk);
if (retval)
kfree(action);
-#ifdef CONFIG_DEBUG_SHIRQ_FIXME
+#ifdef CONFIG_DEBUG_SHIRQ
if (irqflags & IRQF_SHARED) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
switch (remcom_in_buffer[1]) {
case 's':
case 'f':
- if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10))
+ if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
+ error_packet(remcom_out_buffer, -EINVAL);
break;
+ }
i = 0;
remcom_out_buffer[0] = 'm';
pack_threadid(remcom_out_buffer + 2, thref);
break;
case 'T':
- if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16))
+ if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
+ error_packet(remcom_out_buffer, -EINVAL);
break;
-
+ }
ks->threadid = 0;
ptr = remcom_in_buffer + 17;
kgdb_hex2long(&ptr, &ks->threadid);
goto group_exit;
}
+ /* create the /sys/kernel/uids/ directory */
+ error = uids_sysfs_init();
+ if (error)
+ goto notes_exit;
+
return 0;
+notes_exit:
+ if (notes_size > 0)
+ sysfs_remove_bin_file(kernel_kobj, ¬es_attr);
group_exit:
sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit:
return sysctl_perf_event_paranoid > 1;
}
-/* Minimum for 128 pages + 1 for the user control page */
-int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
+int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
/*
* max perf event sample rate
return -1;
}
-int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
+int next_pidmap(struct pid_namespace *pid_ns, int last)
{
int offset;
struct pidmap *map, *end;
- if (last >= PID_MAX_LIMIT)
- return -1;
-
offset = (last + 1) & BITS_PER_PAGE_MASK;
map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
end = &pid_ns->pidmap[PIDMAP_ENTRIES];
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
unsigned int nr_pages, unsigned int nr_highmem)
{
+ int error = 0;
+
if (nr_highmem > 0) {
- if (get_highmem_buffer(PG_ANY))
+ error = get_highmem_buffer(PG_ANY);
+ if (error)
goto err_out;
if (nr_highmem > alloc_highmem) {
nr_highmem -= alloc_highmem;
err_out:
swsusp_free();
- return -ENOMEM;
+ return error;
}
asmlinkage int swsusp_save(void)
child->exit_code = data;
dead = __ptrace_detach(current, child);
if (!child->exit_state)
- wake_up_state(child, TASK_TRACED | TASK_STOPPED);
+ wake_up_process(child);
}
write_unlock_irq(&tasklist_lock);
*/
static DEFINE_MUTEX(sched_domains_mutex);
-#ifdef CONFIG_CGROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
#include <linux/cgroup.h>
/* task group related information */
struct task_group {
+#ifdef CONFIG_CGROUP_SCHED
struct cgroup_subsys_state css;
+#endif
+
+#ifdef CONFIG_USER_SCHED
+ uid_t uid;
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct list_head children;
};
+#ifdef CONFIG_USER_SCHED
+
+/* Helper function to pass uid information to create_sched_user() */
+void set_tg_uid(struct user_struct *user)
+{
+ user->tg->uid = user->uid;
+}
+
+/*
+ * Root task group.
+ * Every UID task group (including init_task_group aka UID-0) will
+ * be a child to this group.
+ */
+struct task_group root_task_group;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/* Default task group's sched entity on each cpu */
+static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
+/* Default task group's cfs_rq on each cpu */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+#ifdef CONFIG_RT_GROUP_SCHED
+static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+#endif /* CONFIG_RT_GROUP_SCHED */
+#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
+#endif /* CONFIG_USER_SCHED */
/* task_group_lock serializes add/remove of task groups and also changes to
* a task group's cpu shares.
}
#endif
+#ifdef CONFIG_USER_SCHED
+# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
+#else /* !CONFIG_USER_SCHED */
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
+#endif /* CONFIG_USER_SCHED */
/*
* A weight of 0 or 1 can cause arithmetics problems.
{
struct task_group *tg;
-#ifdef CONFIG_CGROUP_SCHED
+#ifdef CONFIG_USER_SCHED
+ rcu_read_lock();
+ tg = __task_cred(p)->user->tg;
+ rcu_read_unlock();
+#elif defined(CONFIG_CGROUP_SCHED)
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
#else
return NULL;
}
-#endif /* CONFIG_CGROUP_SCHED */
+#endif /* CONFIG_GROUP_SCHED */
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct mm_struct *prev_mm;
u64 clock;
- u64 clock_task;
atomic_t nr_iowait;
struct root_domain *rd;
struct sched_domain *sd;
- unsigned long cpu_power;
-
unsigned char idle_at_tick;
/* For active balancing */
int post_schedule;
u64 avg_idle;
#endif
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
- u64 prev_irq_time;
-#endif
-
/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
+static inline
+void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+{
+ rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+}
static inline int cpu_of(struct rq *rq)
{
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() (&__raw_get_cpu_var(runqueues))
-static u64 irq_time_cpu(int cpu);
-static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
-
inline void update_rq_clock(struct rq *rq)
{
- int cpu = cpu_of(rq);
- u64 irq_time;
-
rq->clock = sched_clock_cpu(cpu_of(rq));
- irq_time = irq_time_cpu(cpu);
- if (rq->clock - irq_time > rq->clock_task)
- rq->clock_task = rq->clock - irq_time;
-
- sched_irq_time_avg_update(rq, irq_time);
}
/*
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
-
-static void sched_avg_update(struct rq *rq)
-{
-}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
return max(rq->cpu_load[type-1], total);
}
+static struct sched_group *group_of(int cpu)
+{
+ struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+ if (!sd)
+ return NULL;
+
+ return sd->groups;
+}
+
static unsigned long power_of(int cpu)
{
- return cpu_rq(cpu)->cpu_power;
+ struct sched_group *group = group_of(cpu);
+
+ if (!group)
+ return SCHED_LOAD_SCALE;
+
+ return group->cpu_power;
}
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#endif
}
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-
-/*
- * There are no locks covering percpu hardirq/softirq time.
- * They are only modified in account_system_vtime, on corresponding CPU
- * with interrupts disabled. So, writes are safe.
- * They are read and saved off onto struct rq in update_rq_clock().
- * This may result in other CPU reading this CPU's irq time and can
- * race with irq/account_system_vtime on this CPU. We would either get old
- * or new value (or semi updated value on 32 bit) with a side effect of
- * accounting a slice of irq time to wrong task when irq is in progress
- * while we read rq->clock. That is a worthy compromise in place of having
- * locks on each irq in account_system_time.
- */
-static DEFINE_PER_CPU(u64, cpu_hardirq_time);
-static DEFINE_PER_CPU(u64, cpu_softirq_time);
-
-static DEFINE_PER_CPU(u64, irq_start_time);
-static int sched_clock_irqtime;
-
-void enable_sched_clock_irqtime(void)
-{
- sched_clock_irqtime = 1;
-}
-
-void disable_sched_clock_irqtime(void)
-{
- sched_clock_irqtime = 0;
-}
-
-static u64 irq_time_cpu(int cpu)
-{
- if (!sched_clock_irqtime)
- return 0;
-
- return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
-}
-
-void account_system_vtime(struct task_struct *curr)
-{
- unsigned long flags;
- int cpu;
- u64 now, delta;
-
- if (!sched_clock_irqtime)
- return;
-
- local_irq_save(flags);
-
- cpu = smp_processor_id();
- now = sched_clock_cpu(cpu);
- delta = now - per_cpu(irq_start_time, cpu);
- per_cpu(irq_start_time, cpu) = now;
- /*
- * We do not account for softirq time from ksoftirqd here.
- * We want to continue accounting softirq time to ksoftirqd thread
- * in that case, so as not to confuse scheduler with a special task
- * that do not consume any time, but still wants to run.
- */
- if (hardirq_count())
- per_cpu(cpu_hardirq_time, cpu) += delta;
- else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
- per_cpu(cpu_softirq_time, cpu) += delta;
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(account_system_vtime);
-
-static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
-{
- if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
- u64 delta_irq = curr_irq_time - rq->prev_irq_time;
- rq->prev_irq_time = curr_irq_time;
- sched_rt_avg_update(rq, delta_irq);
- }
-}
-
-#else
-
-static u64 irq_time_cpu(int cpu)
-{
- return 0;
-}
-
-static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
-
-#endif
-
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
static void set_load_weight(struct task_struct *p)
{
if (task_has_rt_policy(p)) {
- p->se.load.weight = 0;
- p->se.load.inv_weight = WMULT_CONST;
+ p->se.load.weight = prio_to_weight[0] * 2;
+ p->se.load.inv_weight = prio_to_wmult[0] >> 1;
return;
}
if (p->sched_class != &fair_sched_class)
return 0;
- if (unlikely(p->policy == SCHED_IDLE))
- return 0;
-
/*
* Buddy candidates are cache hot:
*/
preempt_enable();
}
-static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
-{
- const struct sched_class *class;
-
- if (p->sched_class == rq->curr->sched_class) {
- rq->curr->sched_class->check_preempt_curr(rq, p, flags);
- } else {
- for_each_class(class) {
- if (class == rq->curr->sched_class)
- break;
- if (class == p->sched_class) {
- resched_task(rq->curr);
- break;
- }
- }
- }
-}
-
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
this_rq->calc_load_update += LOAD_FREQ;
calc_load_account_active(this_rq);
}
-
- sched_avg_update(this_rq);
}
#ifdef CONFIG_SMP
* 2) too many balance attempts have failed.
*/
- tsk_cache_hot = task_hot(p, rq->clock_task, sd);
+ tsk_cache_hot = task_hot(p, rq->clock, sd);
if (!tsk_cache_hot ||
sd->nr_balance_failed > sd->cache_nice_tries) {
#ifdef CONFIG_SCHEDSTATS
unsigned long this_load;
unsigned long this_load_per_task;
unsigned long this_nr_running;
- unsigned long this_has_capacity;
- unsigned int this_idle_cpus;
/* Statistics of the busiest group */
- unsigned int busiest_idle_cpus;
unsigned long max_load;
unsigned long busiest_load_per_task;
unsigned long busiest_nr_running;
unsigned long busiest_group_capacity;
- unsigned long busiest_has_capacity;
- unsigned int busiest_group_weight;
int group_imb; /* Is there imbalance in this sd */
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
unsigned long sum_nr_running; /* Nr tasks running in the group */
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long group_capacity;
- unsigned long idle_cpus;
- unsigned long group_weight;
int group_imb; /* Is there an imbalance in the group ? */
- int group_has_capacity; /* Is there extra capacity in the group? */
};
/**
struct rq *rq = cpu_rq(cpu);
u64 total, available;
- total = sched_avg_period() + (rq->clock - rq->age_stamp);
+ sched_avg_update(rq);
- if (unlikely(total < rq->rt_avg)) {
- /* Ensures that power won't end up being negative */
- available = 0;
- } else {
- available = total - rq->rt_avg;
- }
+ total = sched_avg_period() + (rq->clock - rq->age_stamp);
+ available = total - rq->rt_avg;
if (unlikely((s64)total < SCHED_LOAD_SCALE))
total = SCHED_LOAD_SCALE;
if (!power)
power = 1;
- cpu_rq(cpu)->cpu_power = power;
sdg->cpu_power = power;
}
int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs)
{
- unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
+ unsigned long load, max_cpu_load, min_cpu_load;
int i;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long avg_load_per_task = 0;
/* Tally up the load of all CPUs in the group */
max_cpu_load = 0;
min_cpu_load = ~0UL;
- max_nr_running = 0;
for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq = cpu_rq(i);
load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
- if (load > max_cpu_load) {
+ if (load > max_cpu_load)
max_cpu_load = load;
- max_nr_running = rq->nr_running;
- }
if (min_cpu_load > load)
min_cpu_load = load;
}
sgs->group_load += load;
sgs->sum_nr_running += rq->nr_running;
sgs->sum_weighted_load += weighted_cpuload(i);
- if (idle_cpu(i))
- sgs->idle_cpus++;
+
}
/*
if (sgs->sum_nr_running)
avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
sgs->group_imb = 1;
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
- sgs->group_weight = group->group_weight;
-
- if (sgs->group_capacity > sgs->sum_nr_running)
- sgs->group_has_capacity = 1;
+ sgs->group_capacity =
+ DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
}
/**
/*
* In case the child domain prefers tasks go to siblings
* first, lower the group capacity to one so that we'll try
- * and move all the excess tasks away. We lower the capacity
- * of a group only if the local group has the capacity to fit
- * these excess tasks, i.e. nr_running < group_capacity. The
- * extra check prevents the case where you always pull from the
- * heaviest group when it is already under-utilized (possible
- * with a large weight task outweighs the tasks on the system).
+ * and move all the excess tasks away.
*/
- if (prefer_sibling && !local_group && sds->this_has_capacity)
+ if (prefer_sibling)
sgs.group_capacity = min(sgs.group_capacity, 1UL);
if (local_group) {
sds->this = group;
sds->this_nr_running = sgs.sum_nr_running;
sds->this_load_per_task = sgs.sum_weighted_load;
- sds->this_has_capacity = sgs.group_has_capacity;
- sds->this_idle_cpus = sgs.idle_cpus;
} else if (sgs.avg_load > sds->max_load &&
(sgs.sum_nr_running > sgs.group_capacity ||
sgs.group_imb)) {
sds->max_load = sgs.avg_load;
sds->busiest = group;
sds->busiest_nr_running = sgs.sum_nr_running;
- sds->busiest_idle_cpus = sgs.idle_cpus;
sds->busiest_group_capacity = sgs.group_capacity;
- sds->busiest_group_weight = sgs.group_weight;
sds->busiest_load_per_task = sgs.sum_weighted_load;
- sds->busiest_has_capacity = sgs.group_has_capacity;
sds->group_imb = sgs.group_imb;
}
return fix_small_imbalance(sds, this_cpu, imbalance);
}
-
/******* find_busiest_group() helpers end here *********************/
/**
* 4) This group is more busy than the avg busieness at this
* sched_domain.
* 5) The imbalance is within the specified limit.
- *
- * Note: when doing newidle balance, if the local group has excess
- * capacity (i.e. nr_running < group_capacity) and the busiest group
- * does not have any capacity, we force a load balance to pull tasks
- * to the local group. In this case, we skip past checks 3, 4 and 5.
*/
if (balance && !(*balance))
goto ret;
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
- /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
- if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
- !sds.busiest_has_capacity)
- goto force_balance;
-
if (sds.this_load >= sds.max_load)
goto out_balanced;
if (sds.this_load >= sds.avg_load)
goto out_balanced;
- /*
- * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
- * And to check for busy balance use !idle_cpu instead of
- * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
- * even when they are idle.
- */
- if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
- if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
- goto out_balanced;
- } else {
- /*
- * This cpu is idle. If the busiest group load doesn't
- * have more tasks than the number of available cpu's and
- * there is no imbalance between this and busiest group
- * wrt to idle cpu's, it is balanced.
- */
- if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
- sds.busiest_nr_running <= sds.busiest_group_weight)
- goto out_balanced;
- }
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+ goto out_balanced;
-force_balance:
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(&sds, this_cpu, imbalance);
return sds.busiest;
if (!ld_moved) {
schedstat_inc(sd, lb_failed[idle]);
- /*
- * Increment the failure counter only on periodic balance.
- * We do not want newidle balance, which can be very
- * frequent, pollute the failure counter causing
- * excessive cache_hot migrations and active balances.
- */
- if (idle != CPU_NEWLY_IDLE)
- sd->nr_balance_failed++;
+ sd->nr_balance_failed++;
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
if (task_current(rq, p)) {
update_rq_clock(rq);
- ns = rq->clock_task - p->se.exec_start;
+ ns = rq->clock - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
tmp = cputime_to_cputime64(cputime);
if (hardirq_count() - hardirq_offset)
cpustat->irq = cputime64_add(cpustat->irq, tmp);
- else if (in_serving_softirq())
+ else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
else
cpustat->system = cputime64_add(cpustat->system, tmp);
idle->se.exec_start = sched_clock();
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
- /*
- * We're having a chicken and egg problem, even though we are
- * holding rq->lock, the cpu isn't yet set to this cpu so the
- * lockdep check in task_group() will fail.
- *
- * Similar case to sched_fork(). / Alternatively we could
- * use task_rq_lock() here and obtain the other rq->lock.
- *
- * Silence PROVE_RCU
- */
- rcu_read_lock();
__set_task_cpu(idle, cpu);
- rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
- ftrace_graph_init_idle_task(idle, cpu);
+ ftrace_graph_init_task(idle);
}
/*
if (cpu != group_first_cpu(sd->groups))
return;
- sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
-
child = sd->child;
sd->groups->cpu_power = 0;
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
+#ifdef CONFIG_USER_SCHED
+ alloc_size *= 2;
+#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
+#ifdef CONFIG_USER_SCHED
+ root_task_group.se = (struct sched_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
init_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
+#ifdef CONFIG_USER_SCHED
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&init_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
+#ifdef CONFIG_USER_SCHED
+ init_rt_bandwidth(&root_task_group.rt_bandwidth,
+ global_rt_period(), RUNTIME_INF);
+#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
-#ifdef CONFIG_CGROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
list_add(&init_task_group.list, &task_groups);
INIT_LIST_HEAD(&init_task_group.children);
-#endif /* CONFIG_CGROUP_SCHED */
+#ifdef CONFIG_USER_SCHED
+ INIT_LIST_HEAD(&root_task_group.children);
+ init_task_group.parent = &root_task_group;
+ list_add(&init_task_group.siblings, &root_task_group.children);
+#endif /* CONFIG_USER_SCHED */
+#endif /* CONFIG_GROUP_SCHED */
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
*/
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
+#elif defined CONFIG_USER_SCHED
+ root_task_group.shares = NICE_0_LOAD;
+ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
+ /*
+ * In case of task-groups formed thr' the user id of tasks,
+ * init_task_group represents tasks belonging to root user.
+ * Hence it forms a sibling of all subsequent groups formed.
+ * In this case, init_task_group gets only a fraction of overall
+ * system cpu resource, based on the weight assigned to root
+ * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
+ * by letting tasks of init_task_group sit in a separate cfs_rq
+ * (init_tg_cfs_rq) and having one entity represent this group of
+ * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
+ */
+ init_tg_cfs_entry(&init_task_group,
+ &per_cpu(init_tg_cfs_rq, i),
+ &per_cpu(init_sched_entity, i), i, 1,
+ root_task_group.se[i]);
+
#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
- rq->cpu_power = SCHED_LOAD_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
}
#endif /* CONFIG_RT_GROUP_SCHED */
-#ifdef CONFIG_CGROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
+ set_task_rq(tsk, task_cpu(tsk));
+
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (tsk->sched_class->task_move_group)
- tsk->sched_class->task_move_group(tsk, on_rq);
- else
+ if (tsk->sched_class->moved_group)
+ tsk->sched_class->moved_group(tsk, on_rq);
#endif
- set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
task_rq_unlock(rq, &flags);
}
-#endif /* CONFIG_CGROUP_SCHED */
+#endif /* CONFIG_GROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
runtime = d->rt_runtime;
}
+#ifdef CONFIG_USER_SCHED
+ if (tg == &root_task_group) {
+ period = global_rt_period();
+ runtime = global_rt_runtime();
+ }
+#endif
+
/*
* Cannot have more runtime than the period.
*/
task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
+#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
+ {
+ uid_t uid = cfs_rq->tg->uid;
+ SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
+ }
#else
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
#endif
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_of(cfs_rq)->clock_task;
+ u64 now = rq_of(cfs_rq)->clock;
unsigned long delta_exec;
if (unlikely(!curr))
/*
* We are starting a new run period:
*/
- se->exec_start = rq_of(cfs_rq)->clock_task;
+ se->exec_start = rq_of(cfs_rq)->clock;
}
/**************************************************
unsigned long this_load, load;
int idx, this_cpu, prev_cpu;
unsigned long tl_per_task;
+ unsigned int imbalance;
struct task_group *tg;
unsigned long weight;
int balanced;
tg = task_group(p);
weight = p->se.load.weight;
+ imbalance = 100 + (sd->imbalance_pct - 100) / 2;
+
/*
* In low-load situations, where prev_cpu is idle and this_cpu is idle
* due to the sync cause above having dropped this_load to 0, we'll
* Otherwise check if either cpus are near enough in load to allow this
* task to be woken on this_cpu.
*/
- if (this_load) {
- unsigned long this_eff_load, prev_eff_load;
-
- this_eff_load = 100;
- this_eff_load *= power_of(prev_cpu);
- this_eff_load *= this_load +
- effective_load(tg, this_cpu, weight, weight);
-
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= power_of(this_cpu);
- prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
-
- balanced = this_eff_load <= prev_eff_load;
- } else
- balanced = true;
-
+ balanced = !this_load ||
+ 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
+ imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
rcu_read_unlock();
/*
update_rq_clock(rq);
- if (unlikely(task_cpu(p) != this_cpu)) {
- rcu_read_lock();
+ if (unlikely(task_cpu(p) != this_cpu))
__set_task_cpu(p, this_cpu);
- rcu_read_unlock();
- }
update_curr(cfs_rq);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void task_move_group_fair(struct task_struct *p, int on_rq)
+static void moved_group_fair(struct task_struct *p, int on_rq)
{
- /*
- * If the task was not on the rq at the time of this cgroup movement
- * it must have been asleep, sleeping tasks keep their ->vruntime
- * absolute on their old rq until wakeup (needed for the fair sleeper
- * bonus in place_entity()).
- *
- * If it was on the rq, we've just 'preempted' it, which does convert
- * ->vruntime to a relative base.
- *
- * Make sure both cases convert their relative position when migrating
- * to another cgroup's rq. This does somewhat interfere with the
- * fair sleeper stuff for the first placement, but who cares.
- */
- if (!on_rq)
- p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
- set_task_rq(p, task_cpu(p));
+ struct cfs_rq *cfs_rq = task_cfs_rq(p);
+
+ update_curr(cfs_rq);
if (!on_rq)
- p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
+ place_entity(cfs_rq, &p->se, 1);
}
#endif
.get_rr_interval = get_rr_interval_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
- .task_move_group = task_move_group_fair,
+ .moved_group = moved_group_fair,
#endif
};
* release the lock. Decreases scheduling overhead.
*/
SCHED_FEAT(OWNER_SPIN, 1)
-
-/*
- * Decrement CPU power based on irq activity
- */
-SCHED_FEAT(NONIRQ_POWER, 1)
if (!task_has_rt_policy(curr))
return;
- delta_exec = rq->clock_task - curr->se.exec_start;
+ delta_exec = rq->clock - curr->se.exec_start;
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
- curr->se.exec_start = rq->clock_task;
+ curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
*
- * We want to avoid overloading runqueues. If the woken
- * task is a higher priority, then it will stay on this CPU
- * and the lower prio task should be moved to another CPU.
- * Even though this will probably make the lower prio task
- * lose its cache, we do not want to bounce a higher task
- * around just because it gave up its CPU, perhaps for a
- * lock?
- *
- * For equal prio tasks, we just let the scheduler sort it out.
+ * We want to avoid overloading runqueues. Even if
+ * the RT task is of higher priority than the current RT task.
+ * RT tasks behave differently than other tasks. If
+ * one gets preempted, we try to push it off to another queue.
+ * So trying to keep a preempting RT task on the same
+ * cache hot CPU will force the running RT task to
+ * a cold CPU. So we waste all the cache for the lower
+ * RT task in hopes of saving some of a RT task
+ * that is just being woken and probably will have
+ * cold cache anyway.
*/
if (unlikely(rt_task(rq->curr)) &&
- (rq->curr->rt.nr_cpus_allowed < 2 ||
- rq->curr->prio < p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
int cpu = find_lowest_rq(p);
} while (rt_rq);
p = rt_task_of(rt_se);
- p->se.exec_start = rq->clock_task;
+ p->se.exec_start = rq->clock;
return p;
}
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
has_pushable_tasks(rq) &&
- p->rt.nr_cpus_allowed > 1 &&
- rt_task(rq->curr) &&
- (rq->curr->rt.nr_cpus_allowed < 2 ||
- rq->curr->prio < p->prio))
+ p->rt.nr_cpus_allowed > 1)
push_rt_tasks(rq);
}
{
struct task_struct *p = rq->curr;
- p->se.exec_start = rq->clock_task;
+ p->se.exec_start = rq->clock;
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
return -EFAULT;
/* Not even root can pretend to send signals from the kernel.
- * Nor can they impersonate a kill()/tgkill(), which adds source info.
- */
- if (info.si_code >= 0 || info.si_code == SI_TKILL) {
- /* We used to allow any < 0 si_code */
- WARN_ON_ONCE(info.si_code < 0);
+ Nor can they impersonate a kill(), which adds source info. */
+ if (info.si_code >= 0)
return -EPERM;
- }
info.si_signo = sig;
/* POSIX.1b doesn't mention process groups. */
return -EINVAL;
/* Not even root can pretend to send signals from the kernel.
- * Nor can they impersonate a kill()/tgkill(), which adds source info.
- */
- if (info->si_code >= 0 || info->si_code == SI_TKILL) {
- /* We used to allow any < 0 si_code */
- WARN_ON_ONCE(info->si_code < 0);
+ Nor can they impersonate a kill(), which adds source info. */
+ if (info->si_code >= 0)
return -EPERM;
- }
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
- /*
- * Since we walk the list without any locks, we might
- * see an entry that was completed, removed from the
- * list and is in the process of being reused.
- *
- * We must check that the cpu is in the cpumask before
- * checking the refs, and both must be set before
- * executing the callback on this cpu.
- */
-
- if (!cpumask_test_cpu(cpu, data->cpumask))
- continue;
-
- smp_rmb();
-
- if (atomic_read(&data->refs) == 0)
- continue;
-
if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
continue;
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (!refs) {
- WARN_ON(!cpumask_empty(data->cpumask));
-
spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
spin_unlock(&call_function.lock);
{
struct call_function_data *data;
unsigned long flags;
- int refs, cpu, next_cpu, this_cpu = smp_processor_id();
+ int cpu, next_cpu, this_cpu = smp_processor_id();
/*
* Can deadlock when called with interrupts disabled.
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress);
- /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
+ /* So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
if (cpu == this_cpu)
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
- /* This BUG_ON verifies our reuse assertions and can be removed */
- BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
-
- /*
- * The global call function queue list add and delete are protected
- * by a lock, but the list is traversed without any lock, relying
- * on the rcu list add and delete to allow safe concurrent traversal.
- * We reuse the call function data without waiting for any grace
- * period after some other cpu removes it from the global queue.
- * This means a cpu might find our data block as it is being
- * filled out.
- *
- * We hold off the interrupt handler on the other cpu by
- * ordering our writes to the cpu mask vs our setting of the
- * refs counter. We assert only the cpu owning the data block
- * will set a bit in cpumask, and each bit will only be cleared
- * by the subject cpu. Each cpu must first find its bit is
- * set and then check that refs is set indicating the element is
- * ready to be processed, otherwise it must skip the entry.
- *
- * On the previous iteration refs was set to 0 by another cpu.
- * To avoid the use of transitivity, set the counter to 0 here
- * so the wmb will pair with the rmb in the interrupt handler.
- */
- atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
-
data->csd.func = func;
data->csd.info = info;
-
- /* Ensure 0 refs is visible before mask. Also orders func and info */
- smp_wmb();
-
- /* We rely on the "and" being processed before the store */
cpumask_and(data->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask);
- refs = cpumask_weight(data->cpumask);
-
- /* Some callers race with other cpus changing the passed mask */
- if (unlikely(!refs)) {
- csd_unlock(&data->csd);
- return;
- }
+ atomic_set(&data->refs, cpumask_weight(data->cpumask));
spin_lock_irqsave(&call_function.lock, flags);
/*
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
- /*
- * We rely on the wmb() in list_add_rcu to complete our writes
- * to the cpumask before this write to refs, which indicates
- * data is on the list and is ready to be processed.
- */
- atomic_set(&data->refs, refs);
spin_unlock_irqrestore(&call_function.lock, flags);
/*
wake_up_process(tsk);
}
-/*
- * preempt_count and SOFTIRQ_OFFSET usage:
- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
- * softirq processing.
- * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
- * on local_bh_disable or local_bh_enable.
- * This lets us distinguish between whether we are currently processing
- * softirq and whether we just have bh disabled.
- */
-
/*
* This one is for softirq.c-internal use,
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip, unsigned int cnt)
+static void __local_bh_disable(unsigned long ip)
{
unsigned long flags;
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
- preempt_count() += cnt;
+ preempt_count() += SOFTIRQ_OFFSET;
/*
* Were softirqs turned off above:
*/
- if (softirq_count() == cnt)
+ if (softirq_count() == SOFTIRQ_OFFSET)
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
- if (preempt_count() == cnt)
+ if (preempt_count() == SOFTIRQ_OFFSET)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
+static inline void __local_bh_disable(unsigned long ip)
{
- add_preempt_count(cnt);
+ add_preempt_count(SOFTIRQ_OFFSET);
barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */
void local_bh_disable(void)
{
- __local_bh_disable((unsigned long)__builtin_return_address(0),
- SOFTIRQ_DISABLE_OFFSET);
+ __local_bh_disable((unsigned long)__builtin_return_address(0));
}
EXPORT_SYMBOL(local_bh_disable);
-static void __local_bh_enable(unsigned int cnt)
-{
- WARN_ON_ONCE(in_irq());
- WARN_ON_ONCE(!irqs_disabled());
-
- if (softirq_count() == cnt)
- trace_softirqs_on((unsigned long)__builtin_return_address(0));
- sub_preempt_count(cnt);
-}
-
/*
* Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(),
*/
void _local_bh_enable(void)
{
- __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
+ WARN_ON_ONCE(in_irq());
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (softirq_count() == SOFTIRQ_OFFSET)
+ trace_softirqs_on((unsigned long)__builtin_return_address(0));
+ sub_preempt_count(SOFTIRQ_OFFSET);
}
EXPORT_SYMBOL(_local_bh_enable);
/*
* Are softirqs going to be turned on now:
*/
- if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
+ if (softirq_count() == SOFTIRQ_OFFSET)
trace_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
- sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
+ sub_preempt_count(SOFTIRQ_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq();
pending = local_softirq_pending();
account_system_vtime(current);
- __local_bh_disable((unsigned long)__builtin_return_address(0),
- SOFTIRQ_OFFSET);
+ __local_bh_disable((unsigned long)__builtin_return_address(0));
lockdep_softirq_enter();
cpu = smp_processor_id();
lockdep_softirq_exit();
account_system_vtime(current);
- __local_bh_enable(SOFTIRQ_OFFSET);
+ _local_bh_enable();
}
#ifndef __ARCH_HAS_DO_SOFTIRQ
rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) {
- /*
- * Prevent raise_softirq from needlessly waking up ksoftirqd
- * here, as softirq will be serviced on return from interrupt.
- */
- local_bh_disable();
+ __irq_enter();
tick_check_idle(cpu);
- _local_bh_enable();
- }
-
- __irq_enter();
+ } else
+ __irq_enter();
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
{
set_current_state(TASK_INTERRUPTIBLE);
- current->flags |= PF_KSOFTIRQD;
while (!kthread_should_stop()) {
preempt_disable();
if (!local_softirq_pending()) {
if (!new_user)
return -EAGAIN;
+ if (!task_can_switch_user(new_user, current)) {
+ free_uid(new_user);
+ return -EINVAL;
+ }
+
if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != INIT_USER) {
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
- clocksource_enqueue_watchdog(cs);
clocksource_select();
+ clocksource_enqueue_watchdog(cs);
mutex_unlock(&clocksource_mutex);
return 0;
}
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
- int cpu = smp_processor_id();
-
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
+ int cpu = smp_processor_id();
bc->event_handler = tick_handle_oneshot_broadcast;
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
tick_broadcast_set_event(tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
- } else {
- /*
- * The first cpu which switches to oneshot mode sets
- * the bit for all other cpus which are in the general
- * (periodic) broadcast mask. So the bit is set and
- * would prevent the first broadcast enter after this
- * to program the bc device.
- */
- tick_broadcast_clear_oneshot(cpu);
}
}
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
-/*
- * Check whether the broadcast device supports oneshot.
- */
-bool tick_broadcast_oneshot_available(void)
-{
- struct clock_event_device *bc = tick_broadcast_device.evtdev;
-
- return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
-}
-
#endif
{
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
- if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
- return 0;
- if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
- return 1;
- return tick_broadcast_oneshot_available();
+ return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
}
/*
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast(int cpu);
-bool tick_broadcast_oneshot_available(void);
# else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast(int cpu) { }
-static inline bool tick_broadcast_oneshot_available(void) { return true; }
# endif /* !BROADCAST */
#else /* !ONESHOT */
return 0;
}
static inline int tick_broadcast_oneshot_active(void) { return 0; }
-static inline bool tick_broadcast_oneshot_available(void) { return false; }
#endif /* !TICK_ONESHOT */
/*
static struct timespec xtime_cache __attribute__ ((aligned (16)));
void update_xtime_cache(u64 nsec)
{
- /*
- * Use temporary variable so get_seconds() cannot catch
- * an intermediate xtime_cache.tv_sec value.
- * The ACCESS_ONCE() keeps the compiler from optimizing
- * out the intermediate value.
- */
- struct timespec ts = xtime;
- timespec_add_ns(&ts, nsec);
- ACCESS_ONCE(xtime_cache) = ts;
+ xtime_cache = xtime;
+ timespec_add_ns(&xtime_cache, nsec);
}
/* must hold xtime_lock */
/* The cpu_boot init_task->ret_stack will never be freed */
for_each_online_cpu(cpu) {
if (!idle_task(cpu)->ret_stack)
- ftrace_graph_init_idle_task(idle_task(cpu), cpu);
+ ftrace_graph_init_task(idle_task(cpu));
}
do {
mutex_unlock(&ftrace_lock);
}
-static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
-
-static void
-graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
-{
- atomic_set(&t->tracing_graph_pause, 0);
- atomic_set(&t->trace_overrun, 0);
- t->ftrace_timestamp = 0;
- /* make curr_ret_stack visable before we add the ret_stack */
- smp_wmb();
- t->ret_stack = ret_stack;
-}
-
-/*
- * Allocate a return stack for the idle task. May be the first
- * time through, or it may be done by CPU hotplug online.
- */
-void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
-{
- t->curr_ret_stack = -1;
- /*
- * The idle task has no parent, it either has its own
- * stack or no stack at all.
- */
- if (t->ret_stack)
- WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
-
- if (ftrace_graph_active) {
- struct ftrace_ret_stack *ret_stack;
-
- ret_stack = per_cpu(idle_ret_stack, cpu);
- if (!ret_stack) {
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
- if (!ret_stack)
- return;
- per_cpu(idle_ret_stack, cpu) = ret_stack;
- }
- graph_init_task(t, ret_stack);
- }
-}
-
/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
GFP_KERNEL);
if (!ret_stack)
return;
- graph_init_task(t, ret_stack);
+ atomic_set(&t->tracing_graph_pause, 0);
+ atomic_set(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
+ /* make curr_ret_stack visable before we add the ret_stack */
+ smp_wmb();
+ t->ret_stack = ret_stack;
}
}
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
+#include "cred-internals.h"
struct user_namespace init_user_ns = {
.kref = {
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
.user_ns = &init_user_ns,
+#ifdef CONFIG_USER_SCHED
+ .tg = &init_task_group,
+#endif
};
/*
put_user_ns(up->user_ns);
}
+#ifdef CONFIG_USER_SCHED
+
+static void sched_destroy_user(struct user_struct *up)
+{
+ sched_destroy_group(up->tg);
+}
+
+static int sched_create_user(struct user_struct *up)
+{
+ int rc = 0;
+
+ up->tg = sched_create_group(&root_task_group);
+ if (IS_ERR(up->tg))
+ rc = -ENOMEM;
+
+ set_tg_uid(up);
+
+ return rc;
+}
+
+#else /* CONFIG_USER_SCHED */
+
+static void sched_destroy_user(struct user_struct *up) { }
+static int sched_create_user(struct user_struct *up) { return 0; }
+
+#endif /* CONFIG_USER_SCHED */
+
+#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
+
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+ if (user->uid == uid) {
+ /* possibly resurrect an "almost deleted" object */
+ if (atomic_inc_return(&user->__count) == 1)
+ cancel_delayed_work(&user->work);
+ return user;
+ }
+ }
+
+ return NULL;
+}
+
+static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
+static DEFINE_MUTEX(uids_mutex);
+
+static inline void uids_mutex_lock(void)
+{
+ mutex_lock(&uids_mutex);
+}
+
+static inline void uids_mutex_unlock(void)
+{
+ mutex_unlock(&uids_mutex);
+}
+
+/* uid directory attributes */
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static ssize_t cpu_shares_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+
+ return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
+}
+
+static ssize_t cpu_shares_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+ unsigned long shares;
+ int rc;
+
+ sscanf(buf, "%lu", &shares);
+
+ rc = sched_group_set_shares(up->tg, shares);
+
+ return (rc ? rc : size);
+}
+
+static struct kobj_attribute cpu_share_attr =
+ __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+
+ return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
+}
+
+static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+ unsigned long rt_runtime;
+ int rc;
+
+ sscanf(buf, "%ld", &rt_runtime);
+
+ rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
+
+ return (rc ? rc : size);
+}
+
+static struct kobj_attribute cpu_rt_runtime_attr =
+ __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
+
+static ssize_t cpu_rt_period_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+
+ return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
+}
+
+static ssize_t cpu_rt_period_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+ unsigned long rt_period;
+ int rc;
+
+ sscanf(buf, "%lu", &rt_period);
+
+ rc = sched_group_set_rt_period(up->tg, rt_period);
+
+ return (rc ? rc : size);
+}
+
+static struct kobj_attribute cpu_rt_period_attr =
+ __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
+#endif
+
+/* default attributes per uid directory */
+static struct attribute *uids_attributes[] = {
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ &cpu_share_attr.attr,
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+ &cpu_rt_runtime_attr.attr,
+ &cpu_rt_period_attr.attr,
+#endif
+ NULL
+};
+
+/* the lifetime of user_struct is not managed by the core (now) */
+static void uids_release(struct kobject *kobj)
+{
+ return;
+}
+
+static struct kobj_type uids_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = uids_attributes,
+ .release = uids_release,
+};
+
+/*
+ * Create /sys/kernel/uids/<uid>/cpu_share file for this user
+ * We do not create this file for users in a user namespace (until
+ * sysfs tagging is implemented).
+ *
+ * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
+ */
+static int uids_user_create(struct user_struct *up)
+{
+ struct kobject *kobj = &up->kobj;
+ int error;
+
+ memset(kobj, 0, sizeof(struct kobject));
+ if (up->user_ns != &init_user_ns)
+ return 0;
+ kobj->kset = uids_kset;
+ error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
+ if (error) {
+ kobject_put(kobj);
+ goto done;
+ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+done:
+ return error;
+}
+
+/* create these entries in sysfs:
+ * "/sys/kernel/uids" directory
+ * "/sys/kernel/uids/0" directory (for root user)
+ * "/sys/kernel/uids/0/cpu_share" file (for root user)
+ */
+int __init uids_sysfs_init(void)
+{
+ uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
+ if (!uids_kset)
+ return -ENOMEM;
+
+ return uids_user_create(&root_user);
+}
+
+/* delayed work function to remove sysfs directory for a user and free up
+ * corresponding structures.
+ */
+static void cleanup_user_struct(struct work_struct *w)
+{
+ struct user_struct *up = container_of(w, struct user_struct, work.work);
+ unsigned long flags;
+ int remove_user = 0;
+
+ /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
+ * atomic.
+ */
+ uids_mutex_lock();
+
+ spin_lock_irqsave(&uidhash_lock, flags);
+ if (atomic_read(&up->__count) == 0) {
+ uid_hash_remove(up);
+ remove_user = 1;
+ }
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+
+ if (!remove_user)
+ goto done;
+
+ if (up->user_ns == &init_user_ns) {
+ kobject_uevent(&up->kobj, KOBJ_REMOVE);
+ kobject_del(&up->kobj);
+ kobject_put(&up->kobj);
+ }
+
+ sched_destroy_user(up);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
+ kmem_cache_free(uid_cachep, up);
+
+done:
+ uids_mutex_unlock();
+}
+
+/* IRQs are disabled and uidhash_lock is held upon function entry.
+ * IRQ state (as stored in flags) is restored and uidhash_lock released
+ * upon function exit.
+ */
+static void free_user(struct user_struct *up, unsigned long flags)
+{
+ INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
+ schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+}
+
+#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
+
static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
return NULL;
}
+int uids_sysfs_init(void) { return 0; }
+static inline int uids_user_create(struct user_struct *up) { return 0; }
+static inline void uids_mutex_lock(void) { }
+static inline void uids_mutex_unlock(void) { }
+
/* IRQs are disabled and uidhash_lock is held upon function entry.
* IRQ state (as stored in flags) is restored and uidhash_lock released
* upon function exit.
*/
static void free_user(struct user_struct *up, unsigned long flags)
- __releases(&uidhash_lock)
{
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
+ sched_destroy_user(up);
key_put(up->uid_keyring);
key_put(up->session_keyring);
kmem_cache_free(uid_cachep, up);
}
+#endif
+
+#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
+/*
+ * We need to check if a setuid can take place. This function should be called
+ * before successfully completing the setuid.
+ */
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+
+ return sched_rt_can_attach(up->tg, tsk);
+
+}
+#else
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+ return 1;
+}
+#endif
+
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid().
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up, *new;
- /* Make uid_hash_find() + uid_hash_insert() atomic. */
+ /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
+ * atomic.
+ */
+ uids_mutex_lock();
+
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock_irq(&uidhash_lock);
new->uid = uid;
atomic_set(&new->__count, 1);
+ if (sched_create_user(new) < 0)
+ goto out_free_user;
+
new->user_ns = get_user_ns(ns);
+ if (uids_user_create(new))
+ goto out_destoy_sched;
+
/*
* Before adding this, check whether we raced
* on adding the same user already..
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
+ /* This case is not possible when CONFIG_USER_SCHED
+ * is defined, since we serialize alloc_uid() using
+ * uids_mutex. Hence no need to call
+ * sched_destroy_user() or remove_user_sysfs_dir().
+ */
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
spin_unlock_irq(&uidhash_lock);
}
+ uids_mutex_unlock();
+
return up;
+out_destoy_sched:
+ sched_destroy_user(new);
+ put_user_ns(new->user_ns);
+out_free_user:
+ kmem_cache_free(uid_cachep, new);
out_unlock:
+ uids_mutex_unlock();
return NULL;
}
*/
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
- new_vma->vm_truncate_count = 0;
+ if (new_vma->vm_truncate_count &&
+ new_vma->vm_truncate_count != vma->vm_truncate_count)
+ new_vma->vm_truncate_count = 0;
}
/*
if (old_len > vma->vm_end - addr)
goto Efault;
- /* Need to be careful about a growing mapping */
- if (new_len > old_len) {
- unsigned long pgoff;
-
- if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
+ if (new_len > old_len)
goto Efault;
- pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
- pgoff += vma->vm_pgoff;
- if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
- goto Einval;
}
if (vma->vm_flags & VM_LOCKED) {
file = shmem_file_setup("dev/zero", size, vma->vm_flags);
if (IS_ERR(file))
return PTR_ERR(file);
-
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = file;
- vma->vm_ops = &shmem_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
+ shmem_set_file(vma, file);
return 0;
}
ax25_cb *ax25;
int err = 0;
- memset(fsa, 0, sizeof(*fsa));
lock_sock(sk);
ax25 = ax25_sk(sk);
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->dest_addr;
+ fsa->fsa_ax25.sax25_ndigis = 0;
if (ax25->digipeat != NULL) {
ndigi = ax25->digipeat->ndigi;
sockfd_put(nsock);
return -EBADFD;
}
- ca.device[sizeof(ca.device)-1] = 0;
err = bnep_add_connection(&ca, nsock);
if (!err) {
break;
}
- memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
- tmp.name[sizeof(tmp.name) - 1] = 0;
-
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize);
if (!newinfo)
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
struct proc_dir_entry *bcm_proc_read;
- char procname [32]; /* inode number in decimal with \0 */
+ char procname [20]; /* pointer printed in ASCII with \0 */
};
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
static int bcm_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct bcm_sock *bo;
+ struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op, *next;
- if (sk == NULL)
- return 0;
-
- bo = bcm_sk(sk);
-
/* remove bcm_ops, timer, rx_unregister(), etc. */
unregister_netdevice_notifier(&bo->notifier);
if (proc_dir) {
/* unique socket address as filename */
- sprintf(bo->procname, "%lu", sock_i_ino(sk));
+ sprintf(bo->procname, "%p", sock);
bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
proc_dir,
&bcm_proc_fops, sk);
static int raw_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct raw_sock *ro;
-
- if (!sk)
- return 0;
-
- ro = raw_sk(sk);
+ struct raw_sock *ro = raw_sk(sk);
unregister_netdevice_notifier(&ro->notifier);
void dev_load(struct net *net, const char *name)
{
struct net_device *dev;
- int no_module;
read_lock(&dev_base_lock);
dev = __dev_get_by_name(net, name);
read_unlock(&dev_base_lock);
- no_module = !dev;
- if (no_module && capable(CAP_NET_ADMIN))
- no_module = request_module("netdev-%s", name);
- if (no_module && capable(CAP_SYS_MODULE)) {
- if (!request_module("%s", name))
- pr_err("Loading kernel module for a network device "
-"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
-"instead\n", name);
- }
+ if (!dev && capable(CAP_NET_ADMIN))
+ request_module("%s", name);
}
EXPORT_SYMBOL(dev_load);
{
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
- skb->dev = napi->dev;
- skb->iif = 0;
napi->skb = skb;
}
*/
unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
{
+ struct sock_filter *fentry; /* We walk down these */
void *ptr;
u32 A = 0; /* Accumulator */
u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
- unsigned long memvalid = 0;
u32 tmp;
int k;
int pc;
- BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
/*
* Process array of filter instructions.
*/
for (pc = 0; pc < flen; pc++) {
- const struct sock_filter *fentry = &filter[pc];
- u32 f_k = fentry->k;
+ fentry = &filter[pc];
switch (fentry->code) {
case BPF_ALU|BPF_ADD|BPF_X:
A += X;
continue;
case BPF_ALU|BPF_ADD|BPF_K:
- A += f_k;
+ A += fentry->k;
continue;
case BPF_ALU|BPF_SUB|BPF_X:
A -= X;
continue;
case BPF_ALU|BPF_SUB|BPF_K:
- A -= f_k;
+ A -= fentry->k;
continue;
case BPF_ALU|BPF_MUL|BPF_X:
A *= X;
continue;
case BPF_ALU|BPF_MUL|BPF_K:
- A *= f_k;
+ A *= fentry->k;
continue;
case BPF_ALU|BPF_DIV|BPF_X:
if (X == 0)
A /= X;
continue;
case BPF_ALU|BPF_DIV|BPF_K:
- A /= f_k;
+ A /= fentry->k;
continue;
case BPF_ALU|BPF_AND|BPF_X:
A &= X;
continue;
case BPF_ALU|BPF_AND|BPF_K:
- A &= f_k;
+ A &= fentry->k;
continue;
case BPF_ALU|BPF_OR|BPF_X:
A |= X;
continue;
case BPF_ALU|BPF_OR|BPF_K:
- A |= f_k;
+ A |= fentry->k;
continue;
case BPF_ALU|BPF_LSH|BPF_X:
A <<= X;
continue;
case BPF_ALU|BPF_LSH|BPF_K:
- A <<= f_k;
+ A <<= fentry->k;
continue;
case BPF_ALU|BPF_RSH|BPF_X:
A >>= X;
continue;
case BPF_ALU|BPF_RSH|BPF_K:
- A >>= f_k;
+ A >>= fentry->k;
continue;
case BPF_ALU|BPF_NEG:
A = -A;
continue;
case BPF_JMP|BPF_JA:
- pc += f_k;
+ pc += fentry->k;
continue;
case BPF_JMP|BPF_JGT|BPF_K:
- pc += (A > f_k) ? fentry->jt : fentry->jf;
+ pc += (A > fentry->k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JGE|BPF_K:
- pc += (A >= f_k) ? fentry->jt : fentry->jf;
+ pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JEQ|BPF_K:
- pc += (A == f_k) ? fentry->jt : fentry->jf;
+ pc += (A == fentry->k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JSET|BPF_K:
- pc += (A & f_k) ? fentry->jt : fentry->jf;
+ pc += (A & fentry->k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JGT|BPF_X:
pc += (A > X) ? fentry->jt : fentry->jf;
pc += (A & X) ? fentry->jt : fentry->jf;
continue;
case BPF_LD|BPF_W|BPF_ABS:
- k = f_k;
+ k = fentry->k;
load_w:
ptr = load_pointer(skb, k, 4, &tmp);
if (ptr != NULL) {
}
break;
case BPF_LD|BPF_H|BPF_ABS:
- k = f_k;
+ k = fentry->k;
load_h:
ptr = load_pointer(skb, k, 2, &tmp);
if (ptr != NULL) {
}
break;
case BPF_LD|BPF_B|BPF_ABS:
- k = f_k;
+ k = fentry->k;
load_b:
ptr = load_pointer(skb, k, 1, &tmp);
if (ptr != NULL) {
X = skb->len;
continue;
case BPF_LD|BPF_W|BPF_IND:
- k = X + f_k;
+ k = X + fentry->k;
goto load_w;
case BPF_LD|BPF_H|BPF_IND:
- k = X + f_k;
+ k = X + fentry->k;
goto load_h;
case BPF_LD|BPF_B|BPF_IND:
- k = X + f_k;
+ k = X + fentry->k;
goto load_b;
case BPF_LDX|BPF_B|BPF_MSH:
- ptr = load_pointer(skb, f_k, 1, &tmp);
+ ptr = load_pointer(skb, fentry->k, 1, &tmp);
if (ptr != NULL) {
X = (*(u8 *)ptr & 0xf) << 2;
continue;
}
return 0;
case BPF_LD|BPF_IMM:
- A = f_k;
+ A = fentry->k;
continue;
case BPF_LDX|BPF_IMM:
- X = f_k;
+ X = fentry->k;
continue;
case BPF_LD|BPF_MEM:
- A = (memvalid & (1UL << f_k)) ?
- mem[f_k] : 0;
+ A = mem[fentry->k];
continue;
case BPF_LDX|BPF_MEM:
- X = (memvalid & (1UL << f_k)) ?
- mem[f_k] : 0;
+ X = mem[fentry->k];
continue;
case BPF_MISC|BPF_TAX:
X = A;
A = X;
continue;
case BPF_RET|BPF_K:
- return f_k;
+ return fentry->k;
case BPF_RET|BPF_A:
return A;
case BPF_ST:
- memvalid |= 1UL << f_k;
- mem[f_k] = A;
+ mem[fentry->k] = A;
continue;
case BPF_STX:
- memvalid |= 1UL << f_k;
- mem[f_k] = X;
+ mem[fentry->k] = X;
continue;
default:
WARN_ON(1);
/* Caller (dccp_v4_do_rcv) will send Reset */
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
- } else if (sk->sk_state == DCCP_CLOSED) {
- dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
- return 1;
}
if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
}
switch (sk->sk_state) {
+ case DCCP_CLOSED:
+ dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+ return 1;
+
case DCCP_REQUESTING:
queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
if (queued >= 0)
case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R:
if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */
break;
- if (len == 0)
- goto out_invalid_option;
rc = dccp_feat_parse_options(sk, dreq, mandatory, opt,
*value, value + 1, len - 1);
if (rc)
#include <linux/wireless.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
-#include <linux/vmalloc.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <linux/stat.h>
#endif
#ifdef CONFIG_ECONET_AUNUDP
struct msghdr udpmsg;
- struct iovec iov[2];
+ struct iovec iov[msg->msg_iovlen+1];
struct aunhdr ah;
struct sockaddr_in udpdest;
__kernel_size_t size;
+ int i;
mm_segment_t oldfs;
- char *userbuf;
#endif
/*
}
}
+ if (len + 15 > dev->mtu) {
+ mutex_unlock(&econet_mutex);
+ return -EMSGSIZE;
+ }
+
if (dev->type == ARPHRD_ECONET) {
/* Real hardware Econet. We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
unsigned short proto = 0;
int res;
- if (len + 15 > dev->mtu) {
- mutex_unlock(&econet_mutex);
- return -EMSGSIZE;
- }
-
dev_hold(dev);
skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
return -ENETDOWN; /* No socket - can't send */
}
- if (len > 32768) {
- err = -E2BIG;
- goto error;
- }
-
/* Make up a UDP datagram and hand it off to some higher intellect. */
memset(&udpdest, 0, sizeof(udpdest));
udpdest.sin_addr.s_addr = htonl(network | addr.station);
}
- memset(&ah, 0, sizeof(ah));
ah.port = port;
ah.cb = cb & 0x7f;
ah.code = 2; /* magic */
+ ah.pad = 0;
/* tack our header on the front of the iovec */
size = sizeof(struct aunhdr);
+ /*
+ * XXX: that is b0rken. We can't mix userland and kernel pointers
+ * in iovec, since on a lot of platforms copy_from_user() will
+ * *not* work with the kernel and userland ones at the same time,
+ * regardless of what we do with set_fs(). And we are talking about
+ * econet-over-ethernet here, so "it's only ARM anyway" doesn't
+ * apply. Any suggestions on fixing that code? -- AV
+ */
iov[0].iov_base = (void *)&ah;
iov[0].iov_len = size;
-
- userbuf = vmalloc(len);
- if (userbuf == NULL) {
- err = -ENOMEM;
- goto error;
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ void __user *base = msg->msg_iov[i].iov_base;
+ size_t len = msg->msg_iov[i].iov_len;
+ /* Check it now since we switch to KERNEL_DS later. */
+ if (!access_ok(VERIFY_READ, base, len)) {
+ mutex_unlock(&econet_mutex);
+ return -EFAULT;
+ }
+ iov[i+1].iov_base = base;
+ iov[i+1].iov_len = len;
+ size += len;
}
- iov[1].iov_base = userbuf;
- iov[1].iov_len = len;
- err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
- if (err)
- goto error_free_buf;
-
/* Get a skbuff (no data, just holds our cb information) */
if ((skb = sock_alloc_send_skb(sk, 0,
msg->msg_flags & MSG_DONTWAIT,
- &err)) == NULL)
- goto error_free_buf;
+ &err)) == NULL) {
+ mutex_unlock(&econet_mutex);
+ return err;
+ }
eb = (struct ec_cb *)&skb->cb;
udpmsg.msg_name = (void *)&udpdest;
udpmsg.msg_namelen = sizeof(udpdest);
udpmsg.msg_iov = &iov[0];
- udpmsg.msg_iovlen = 2;
+ udpmsg.msg_iovlen = msg->msg_iovlen + 1;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags=0;
oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */
err = sock_sendmsg(udpsock, &udpmsg, size);
set_fs(oldfs);
-
-error_free_buf:
- vfree(userbuf);
#else
err = -EPROTOTYPE;
#endif
- error:
mutex_unlock(&econet_mutex);
return err;
{
struct iphdr *ip = ip_hdr(skb);
unsigned char stn = ntohl(ip->saddr) & 0xff;
- struct dst_entry *dst = skb_dst(skb);
- struct ec_device *edev = NULL;
struct sock *sk;
struct sk_buff *newskb;
-
- if (dst)
- edev = dst->dev->ec_ptr;
+ struct ec_device *edev = skb->dev->ec_ptr;
if (! edev)
goto bad;
return mtu >= 68;
}
-static void inetdev_send_gratuitous_arp(struct net_device *dev,
- struct in_device *in_dev)
-
-{
- struct in_ifaddr *ifa = in_dev->ifa_list;
-
- if (!ifa)
- return;
-
- arp_send(ARPOP_REQUEST, ETH_P_ARP,
- ifa->ifa_address, dev,
- ifa->ifa_address, NULL,
- dev->dev_addr, NULL);
-}
-
/* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event,
}
ip_mc_up(in_dev);
/* fall through */
- case NETDEV_CHANGEADDR:
- if (!IN_DEV_ARP_NOTIFY(in_dev))
- break;
- /* fall through */
case NETDEV_NOTIFY_PEERS:
+ case NETDEV_CHANGEADDR:
/* Send gratuitous ARP to notify of link change */
- inetdev_send_gratuitous_arp(dev, in_dev);
+ if (IN_DEV_ARP_NOTIFY(in_dev)) {
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+
+ if (ifa)
+ arp_send(ARPOP_REQUEST, ETH_P_ARP,
+ ifa->ifa_address, dev,
+ ifa->ifa_address, NULL,
+ dev->dev_addr, NULL);
+ }
break;
case NETDEV_DOWN:
ip_mc_down(in_dev);
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
- if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+ if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
struct inet_diag_entry entry;
- const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
- sizeof(*r),
- INET_DIAG_REQ_BYTECODE);
+ struct rtattr *bc = (struct rtattr *)(r + 1);
struct inet_sock *inet = inet_sk(sk);
entry.family = sk->sk_family;
entry.dport = ntohs(inet->dport);
entry.userlocks = sk->sk_userlocks;
- if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+ if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
return 0;
}
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
- if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+ if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
struct inet_diag_entry entry;
- const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
- sizeof(*r),
- INET_DIAG_REQ_BYTECODE);
+ struct rtattr *bc = (struct rtattr *)(r + 1);
entry.family = tw->tw_family;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
entry.dport = ntohs(tw->tw_dport);
entry.userlocks = 0;
- if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+ if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
return 0;
}
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt;
- const struct nlattr *bc = NULL;
+ struct rtattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk);
int j, s_j;
int reqnum, s_reqnum;
if (!lopt || !lopt->qlen)
goto out;
- if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
- bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
- INET_DIAG_REQ_BYTECODE);
+ if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+ bc = (struct rtattr *)(r + 1);
entry.sport = inet->num;
entry.userlocks = sk->sk_userlocks;
}
&ireq->rmt_addr;
entry.dport = ntohs(ireq->rmt_port);
- if (!inet_diag_bc_run(nla_data(bc),
- nla_len(bc), &entry))
+ if (!inet_diag_bc_run(RTA_DATA(bc),
+ RTA_PAYLOAD(bc), &entry))
continue;
}
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("gre");
MODULE_ALIAS_RTNL_LINK("gretap");
-MODULE_ALIAS_NETDEV("gre0");
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
- skb = skb_peek_tail(&sk->sk_write_queue);
-
inet->cork.length += length;
- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
* adding appropriate IP header.
*/
- if (!skb)
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
goto alloc_new_skb;
while (length > 0) {
return -EINVAL;
inet->cork.length += size;
- if ((size + skb->len > mtu) &&
- (sk->sk_protocol == IPPROTO_UDP) &&
+ if ((sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
module_init(ipip_init);
module_exit(ipip_fini);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETDEV("tunl0");
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
- tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
- tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
ret = -EFAULT;
break;
}
- rev.name[sizeof(rev.name)-1] = 0;
try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
rev.revision, 1, &ret),
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
- tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
- tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
ret = -EFAULT;
break;
}
- rev.name[sizeof(rev.name)-1] = 0;
if (cmd == IPT_SO_GET_REVISION_TARGET)
target = 1;
struct clusterip_config *c = pde->data;
unsigned long nodenum;
- if (size > PROC_WRITELEN)
- return -EIO;
- if (copy_from_user(buffer, input, size))
+ if (copy_from_user(buffer, input, PROC_WRITELEN))
return -EFAULT;
- buffer[size] = 0;
if (*buffer == '+') {
nodenum = simple_strtoul(buffer+1, NULL, 10);
/* Values greater than interface MTU won't take effect. However
* at the point when this call is done we typically don't yet
* know which interface is going to be used */
- if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
+ if (val < 8 || val > MAX_TCP_WINDOW) {
err = -EINVAL;
break;
}
dev->type == ARPHRD_TUNNEL6 ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_NONE) {
+ printk(KERN_INFO
+ "%s: Disabled Privacy Extensions\n",
+ dev->name);
ndev->cnf.use_tempaddr = -1;
} else {
in6_dev_hold(ndev);
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETDEV("ip6tnl0");
#define IPV6_TLV_TEL_DST_SIZE 8
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
- tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
- tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
ret = -EFAULT;
break;
}
- rev.name[sizeof(rev.name)-1] = 0;
if (cmd == IP6T_SO_GET_REVISION_TARGET)
target = 1;
module_init(sit_init);
module_exit(sit_cleanup);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETDEV("sit0");
+MODULE_ALIAS("sit0");
switch (optname) {
case IRLMP_ENUMDEVICES:
-
- /* Offset to first device entry */
- offset = sizeof(struct irda_device_list) -
- sizeof(struct irda_device_info);
-
- if (len < offset)
- return -EINVAL;
-
/* Ask lmp for the current discovery log */
discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
self->nslots);
err = 0;
/* Write total list length back to client */
- if (copy_to_user(optval, &list, offset))
+ if (copy_to_user(optval, &list,
+ sizeof(struct irda_device_list) -
+ sizeof(struct irda_device_info)))
err = -EFAULT;
+ /* Offset to first device entry */
+ offset = sizeof(struct irda_device_list) -
+ sizeof(struct irda_device_info);
+
/* Copy the list itself - watch for overflow */
if(list.len > 2048)
{
n = 1;
name_len = fp[n++];
-
- IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;);
-
memcpy(name, fp+n, name_len); n+=name_len;
name[name_len] = '\0';
attr_len = fp[n++];
-
- IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;);
-
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
while(isspace(start[length - 1]))
length--;
- DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5,
- -EINVAL, CTRL_ERROR, "Invalid nickname.\n");
-
/* Copy the name for later reuse */
memcpy(ap->rname, start + 5, length - 5);
ap->rname[length - 5] = '\0';
* and we need some headroom for passing the frame to monitor
* interfaces, but never both at the same time.
*/
- BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
- sizeof(struct ieee80211_tx_status_rtap_hdr));
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
sizeof(struct ieee80211_tx_status_rtap_hdr));
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
sta->sdata = sdata;
- sta->last_rx = jiffies;
sta->rate_ctrl = rate_control_get(local->rate_ctrl);
sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
{
- if (pf >= ARRAY_SIZE(nf_loggers))
- return -EINVAL;
mutex_lock(&nf_log_mutex);
if (__find_logger(pf, logger->name) == NULL) {
mutex_unlock(&nf_log_mutex);
void nf_log_unbind_pf(u_int8_t pf)
{
- if (pf >= ARRAY_SIZE(nf_loggers))
- return;
mutex_lock(&nf_log_mutex);
rcu_assign_pointer(nf_loggers[pf], NULL);
mutex_unlock(&nf_log_mutex);
uaddr->sa_family = AF_PACKET;
dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev) {
- strncpy(uaddr->sa_data, dev->name, 14);
+ strlcpy(uaddr->sa_data, dev->name, 15);
dev_put(dev);
} else
memset(uaddr->sa_data, 0, 14);
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
sll->sll_protocol = po->num;
- sll->sll_pkttype = 0;
dev = dev_get_by_index(sock_net(sk), po->ifindex);
if (dev) {
sll->sll_hatype = dev->type;
{
struct net_device *dev = arg;
- if (!net_eq(dev_net(dev), &init_net))
- return 0;
-
switch (what) {
case NETDEV_REGISTER:
if (dev->type == ARPHRD_PHONET)
max_pages = max(nr, max_pages);
nr_pages += nr;
-
- /*
- * nr for one entry in limited to (UINT_MAX>>PAGE_SHIFT)+1
- * so nr_pages cannot overflow without becoming bigger than
- * INT_MAX first. If nr cannot overflow then max_pages should
- * be ok.
- */
- if (nr_pages > INT_MAX) {
- ret = -EINVAL;
- goto out;
- }
}
pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
- if (pt[6] & AX25_HBIT) {
- if (facilities->dest_ndigis >= ROSE_MAX_DIGIS)
- return -1;
+ if (pt[6] & AX25_HBIT)
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
- } else {
- if (facilities->source_ndigis >= ROSE_MAX_DIGIS)
- return -1;
+ else
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
- }
}
}
p += l + 2;
case 0xC0:
l = p[1];
-
- /* Prevent overflows*/
- if (l < 10 || l > 20)
- return -1;
-
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
- if (len < 0)
- return 0;
facilities_len -= len + 1;
p += len + 1;
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
- if (len < 0)
- return 0;
facilities_len -= len + 1;
p += len + 1;
break;
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
- if (in_serving_softirq())
+ if (softirq_count() != SOFTIRQ_OFFSET)
return -1;
rcu_read_lock();
id = ntohs(hmacs->hmac_ids[i]);
/* Check the id is in the supported range */
- if (id > SCTP_AUTH_HMAC_ID_MAX) {
- id = 0;
+ if (id > SCTP_AUTH_HMAC_ID_MAX)
continue;
- }
/* See is we support the id. Supported IDs have name and
* length fields set, so that we can allocated and use
* them. We can safely just check for name, for without the
* name, we can't allocate the TFM.
*/
- if (!sctp_hmac_list[id].hmac_name) {
- id = 0;
+ if (!sctp_hmac_list[id].hmac_name)
continue;
- }
break;
}
sp = sctp_sk(asoc->base.sk);
num_types = sp->pf->supported_addrs(sp, types);
- chunksize = sizeof(init) + addrs_len;
- chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
+ chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
chunksize += sizeof(ecap_param);
if (sctp_prsctp_enable)
/* Add HMACS parameter length if any were defined */
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
+ chunksize += ntohs(auth_hmacs->length);
else
auth_hmacs = NULL;
/* Add CHUNKS parameter length */
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += WORD_ROUND(ntohs(auth_chunks->length));
+ chunksize += ntohs(auth_chunks->length);
else
auth_chunks = NULL;
/* If we have any extensions to report, account for that */
if (num_ext)
- chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
- num_ext);
+ chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
+ chunksize += ntohs(auth_hmacs->length);
else
auth_hmacs = NULL;
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += WORD_ROUND(ntohs(auth_chunks->length));
+ chunksize += ntohs(auth_chunks->length);
else
auth_chunks = NULL;
}
if (num_ext)
- chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
- num_ext);
+ chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
/* Now allocate and fill out the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
sctp_chunk_free(asconf);
asoc->addip_last_asconf = NULL;
+ /* Send the next asconf chunk from the addip chunk queue. */
+ if (!list_empty(&asoc->addip_chunk_list)) {
+ struct list_head *entry = asoc->addip_chunk_list.next;
+ asconf = list_entry(entry, struct sctp_chunk, list);
+
+ list_del_init(entry);
+
+ /* Hold the chunk until an ASCONF_ACK is received. */
+ sctp_chunk_hold(asconf);
+ if (sctp_primitive_ASCONF(asoc, asconf))
+ sctp_chunk_free(asconf);
+ else
+ asoc->addip_last_asconf = asconf;
+ }
+
return retval;
}
}
-/* Sent the next ASCONF packet currently stored in the association.
- * This happens after the ASCONF_ACK was succeffully processed.
- */
-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
-{
- /* Send the next asconf chunk from the addip chunk
- * queue.
- */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- struct sctp_chunk *asconf = list_entry(entry,
- struct sctp_chunk, list);
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-}
-
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
}
error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
- case SCTP_CMD_SEND_NEXT_ASCONF:
- sctp_cmd_send_asconf(asoc);
- break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack)) {
- /* Successfully processed ASCONF_ACK. We can
- * release the next asconf if we have one.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
- SCTP_NULL());
+ asconf_ack))
return SCTP_DISPOSITION_CONSUME;
- }
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (!cd || !try_module_get(cd->owner))
return -EACCES;
han = __seq_open_private(file, &cache_content_op, sizeof(*han));
- if (han == NULL) {
- module_put(cd->owner);
+ if (han == NULL)
return -ENOMEM;
- }
han->cd = cd;
return 0;
save_callback = task->tk_callback;
task->tk_callback = NULL;
save_callback(task);
- } else {
- /*
- * Perform the next FSM step.
- * tk_action may be NULL when the task has been killed
- * by someone else.
- */
+ }
+
+ /*
+ * Perform the next FSM step.
+ * tk_action may be NULL when the task has been killed
+ * by someone else.
+ */
+ if (!RPC_IS_QUEUED(task)) {
if (task->tk_action == NULL)
break;
task->tk_action(task);
if (sk == NULL)
return;
- transport->srcport = 0;
-
write_lock_bh(&sk->sk_callback_lock);
transport->inet = NULL;
transport->sock = NULL;
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
- dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
+ dprintk("RPC: state %x conn %d dead %d zapped %d\n",
sk->sk_state, xprt_connected(xprt),
sock_flag(sk, SOCK_DEAD),
- sock_flag(sk, SOCK_ZAPPED),
- sk->sk_shutdown);
+ sock_flag(sk, SOCK_ZAPPED));
switch (sk->sk_state) {
case TCP_ESTABLISHED:
{
unsigned int state = transport->inet->sk_state;
- if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
- /* we don't need to abort the connection if the socket
- * hasn't undergone a shutdown
- */
- if (transport->inet->sk_shutdown == 0)
- return;
- dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
- __func__, transport->inet->sk_shutdown);
- }
- if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
- /* we don't need to abort the connection if the socket
- * hasn't undergone a shutdown
- */
- if (transport->inet->sk_shutdown == 0)
- return;
- dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
- "sk_shutdown set to %d\n",
- __func__, transport->inet->sk_shutdown);
- }
+ if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
+ return;
+ if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
+ return;
xs_abort_connection(xprt, transport);
}
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct tipc_sock *tsock = tipc_sk(sock->sk);
- memset(addr, 0, sizeof(*addr));
if (peer) {
if ((sock->state != SS_CONNECTED) &&
((peer != 2) || (sock->state != SS_DISCONNECTING)))
int, int);
static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t);
-static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t, int);
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_seqpacket_sendmsg,
- .recvmsg = unix_seqpacket_recvmsg,
+ .recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
sock_wfree(skb);
}
-#define MAX_RECURSION_LEVEL 4
-
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
- unsigned char max_level = 0;
- int unix_sock_count = 0;
-
- for (i = scm->fp->count - 1; i >= 0; i--) {
- struct sock *sk = unix_get_socket(scm->fp->fp[i]);
-
- if (sk) {
- unix_sock_count++;
- max_level = max(max_level,
- unix_sk(sk)->recursion_level);
- }
- }
- if (unlikely(max_level > MAX_RECURSION_LEVEL))
- return -ETOOMANYREFS;
/*
* Need to duplicate file references for the sake of garbage
if (!UNIXCB(skb).fp)
return -ENOMEM;
- if (unix_sock_count) {
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->fp[i]);
- }
+ for (i = scm->fp->count-1; i >= 0; i--)
+ unix_inflight(scm->fp->fp[i]);
skb->destructor = unix_destruct_fds;
- return max_level;
+ return 0;
}
/*
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
- int max_level = 0;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
if (siocb->scm->fp) {
err = unix_attach_fds(siocb->scm, skb);
- if (err < 0)
+ if (err)
goto out_free;
- max_level = err + 1;
}
unix_get_secdata(siocb->scm, skb);
}
skb_queue_tail(&other->sk_receive_queue, skb);
- if (max_level > unix_sk(other)->recursion_level)
- unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other, len);
sock_put(other);
int sent = 0;
struct scm_cookie tmp_scm;
bool fds_sent = false;
- int max_level = 0;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
/* Only send the fds in the first buffer */
if (siocb->scm->fp && !fds_sent) {
err = unix_attach_fds(siocb->scm, skb);
- if (err < 0) {
+ if (err) {
kfree_skb(skb);
goto out_err;
}
- max_level = err + 1;
fds_sent = true;
}
goto pipe_err_free;
skb_queue_tail(&other->sk_receive_queue, skb);
- if (max_level > unix_sk(other)->recursion_level)
- unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other, size);
sent += size;
return unix_dgram_sendmsg(kiocb, sock, msg, len);
}
-static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size,
- int flags)
-{
- struct sock *sk = sock->sk;
-
- if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
-
- return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
-}
-
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
unix_state_lock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
- unix_sk(sk)->recursion_level = 0;
if (copied >= target)
goto unlock;
unsigned int unix_tot_inflight;
-struct sock *unix_get_socket(struct file *filp)
+static struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = filp->f_path.dentry->d_inode;
}
static bool gc_in_progress = false;
-#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
{
- /*
- * If number of inflight sockets is insane,
- * force a garbage collect right now.
- */
- if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
- unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
write_lock_bh(&x25_neigh_list_lock);
list_for_each_safe(entry, tmp, &x25_neigh_list) {
- struct net_device *dev;
-
nb = list_entry(entry, struct x25_neigh, node);
- dev = nb->dev;
__x25_remove_neigh(nb);
- dev_put(dev);
}
write_unlock_bh(&x25_neigh_list_lock);
}
}
if (!child)
continue;
- if (line[0] && line[strlen(line) - 1] == '?') {
+ if (line[strlen(line) - 1] == '?') {
print_help(child);
continue;
}
sid = tsec->sid;
newsid = tsec->create_sid;
- if ((sbsec->flags & SE_SBINITIALIZED) &&
- (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
- newsid = sbsec->mntpoint_sid;
- else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
&newsid);
{
struct task_security_struct *tsec = cred->security;
- /*
- * cred->security == NULL if security_cred_alloc_blank() or
- * security_prepare_creds() returned an error.
- */
- BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
+ BUG_ON((unsigned long) cred->security < PAGE_SIZE);
cred->security = (void *) 0x7UL;
kfree(tsec);
}
{ RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
- { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
- { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
};
static struct nlmsg_perm nlmsg_firewall_perms[] =
{
struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
struct snd_timer *t = stime->timer;
- unsigned long oruns;
if (!atomic_read(&stime->running))
return HRTIMER_NORESTART;
- oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
- snd_timer_interrupt(stime->timer, t->sticks * oruns);
+ hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
+ snd_timer_interrupt(stime->timer, t->sticks);
if (!atomic_read(&stime->running))
return HRTIMER_NORESTART;
return -ENOMEM;
mfile->file = file;
mfile->disconnected_f_op = NULL;
- INIT_LIST_HEAD(&mfile->shutdown_list);
spin_lock(&card->files_lock);
if (card->shutdown) {
spin_unlock(&card->files_lock);
list_for_each_entry(mfile, &card->files_list, list) {
if (mfile->file == file) {
list_del(&mfile->list);
- spin_lock(&shutdown_lock);
- list_del(&mfile->shutdown_list);
- spin_unlock(&shutdown_lock);
if (mfile->disconnected_f_op)
fops_put(mfile->disconnected_f_op);
found = mfile;
void (*reset) (int dev);
void (*hw_control) (int dev, unsigned char *event);
int (*load_patch) (int dev, int format, const char __user *addr,
- int count, int pmgr_flag);
+ int offs, int count, int pmgr_flag);
void (*aftertouch) (int dev, int voice, int pressure);
void (*controller) (int dev, int voice, int ctrl_num, int value);
void (*panning) (int dev, int voice, int value);
int
midi_synth_load_patch(int dev, int format, const char __user *addr,
- int count, int pmgr_flag)
+ int offs, int count, int pmgr_flag)
{
int orig_dev = synth_devs[dev]->midi_dev;
if (!prefix_cmd(orig_dev, 0xf0))
return 0;
- /* Invalid patch format */
if (format != SYSEX_PATCH)
+ {
+/* printk("MIDI Error: Invalid patch format (key) 0x%x\n", format);*/
return -EINVAL;
-
- /* Patch header too short */
+ }
if (count < hdr_size)
+ {
+/* printk("MIDI Error: Patch header too short\n");*/
return -EINVAL;
-
+ }
count -= hdr_size;
/*
- * Copy the header from user space
+ * Copy the header from user space but ignore the first bytes which have
+ * been transferred already.
*/
- if (copy_from_user(&sysex, addr, hdr_size))
+ if(copy_from_user(&((char *) &sysex)[offs], &(addr)[offs], hdr_size - offs))
return -EFAULT;
-
- /* Sysex record too short */
- if ((unsigned)count < (unsigned)sysex.len)
+
+ if (count < sysex.len)
+ {
+/* printk(KERN_WARNING "MIDI Warning: Sysex record too short (%d<%d)\n", count, (int) sysex.len);*/
sysex.len = count;
-
- left = sysex.len;
- src_offs = 0;
+ }
+ left = sysex.len;
+ src_offs = 0;
for (i = 0; i < left && !signal_pending(current); i++)
{
unsigned char data;
- if (get_user(data,
- (unsigned char __user *)(addr + hdr_size + i)))
- return -EFAULT;
+ get_user(*(unsigned char *) &data, (unsigned char __user *) &((addr)[hdr_size + i]));
eox_seen = (i > 0 && data & 0x80); /* End of sysex */
void midi_synth_close (int dev);
void midi_synth_hw_control (int dev, unsigned char *event);
int midi_synth_load_patch (int dev, int format, const char __user * addr,
- int count, int pmgr_flag);
+ int offs, int count, int pmgr_flag);
void midi_synth_panning (int dev, int channel, int pressure);
void midi_synth_aftertouch (int dev, int channel, int pressure);
void midi_synth_controller (int dev, int channel, int ctrl_num, int value);
}
static int opl3_load_patch(int dev, int format, const char __user *addr,
- int count, int pmgr_flag)
+ int offs, int count, int pmgr_flag)
{
struct sbi_instrument ins;
return -EINVAL;
}
- if (copy_from_user(&ins, addr, sizeof(ins)))
+ /*
+ * What the fuck is going on here? We leave junk in the beginning
+ * of ins and then check the field pretty close to that beginning?
+ */
+ if(copy_from_user(&((char *) &ins)[offs], addr + offs, sizeof(ins) - offs))
return -EFAULT;
if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR)
static void opl3_panning(int dev, int voice, int value)
{
-
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
devc->voc[voice].panning = value;
}
static void opl3_setup_voice(int dev, int voice, int chn)
{
- struct channel_info *info;
-
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
- if (chn < 0 || chn > 15)
- return;
-
- info = &synth_devs[dev]->chn_info[chn];
+ struct channel_info *info =
+ &synth_devs[dev]->chn_info[chn];
opl3_set_instr(dev, voice, info->pgm_num);
return -ENXIO;
fmt = (*(short *) &event_rec[0]) & 0xffff;
- err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0);
+ err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0);
if (err < 0)
return err;
.rate_min = 5000,
.rate_max = 48000,
.channels_min = 1,
+#ifdef CHIP_AU8830
+ .channels_max = 4,
+#else
.channels_max = 2,
+#endif
.buffer_bytes_max = 0x10000,
.period_bytes_min = 0x1,
.period_bytes_max = 0x1000,
.periods_max = 64,
};
#endif
-#ifdef CHIP_AU8830
-static unsigned int au8830_channels[3] = {
- 1, 2, 4,
-};
-
-static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
- .count = ARRAY_SIZE(au8830_channels),
- .list = au8830_channels,
- .mask = 0,
-};
-#endif
/* open callback */
static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
{
if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB
|| VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S)
runtime->hw = snd_vortex_playback_hw_adb;
-#ifdef CHIP_AU8830
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
- VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
- runtime->hw.channels_max = 4;
- snd_pcm_hw_constraint_list(runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- &hw_constraints_au8830_channels);
- }
-#endif
substream->runtime->private_data = NULL;
}
#ifndef CHIP_AU8810
mutex_lock(&atc->atc_mutex);
dao->ops->get_spos(dao, &status);
if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) {
- status &= ~(IEC958_AES3_CON_FS << 24);
+ status &= ((~IEC958_AES3_CON_FS) << 24);
status |= (iec958_con_fs << 24);
dao->ops->set_spos(dao, status);
dao->ops->commit_write(dao);
if (!entry)
return -ENOMEM;
- dao->ops->clear_left_input(dao);
/* Program master and conjugate resources */
input->ops->master(input);
daio->rscl.ops->master(&daio->rscl);
if (!entry)
return -ENOMEM;
- dao->ops->clear_right_input(dao);
/* Program master and conjugate resources */
input->ops->master(input);
daio->rscr.ops->master(&daio->rscr);
return 0;
}
+static int ct_spdif_default_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF;
+
+ ucontrol->value.iec958.status[0] = (status >> 0) & 0xff;
+ ucontrol->value.iec958.status[1] = (status >> 8) & 0xff;
+ ucontrol->value.iec958.status[2] = (status >> 16) & 0xff;
+ ucontrol->value.iec958.status[3] = (status >> 24) & 0xff;
+
+ return 0;
+}
+
static int ct_spdif_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
unsigned int status;
atc->spdif_out_get_status(atc, &status);
-
- if (status == 0)
- status = SNDRV_PCM_DEFAULT_CON_SPDIF;
-
ucontrol->value.iec958.status[0] = (status >> 0) & 0xff;
ucontrol->value.iec958.status[1] = (status >> 8) & 0xff;
ucontrol->value.iec958.status[2] = (status >> 16) & 0xff;
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
.count = 1,
.info = ct_spdif_info,
- .get = ct_spdif_get,
+ .get = ct_spdif_default_get,
.put = ct_spdif_put,
.private_value = MIXER_IEC958_DEFAULT
};
#define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */
#define ES_1371_CODEC_RDY (1<<31) /* codec ready */
#define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */
-#define EV_1938_CODEC_MAGIC (1<<26)
#define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */
#define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0))
#define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD)
#ifdef CHIP1371
-static inline bool is_ev1938(struct ensoniq *ensoniq)
-{
- return ensoniq->pci->device == 0x8938;
-}
-
static void snd_es1371_codec_write(struct snd_ac97 *ac97,
unsigned short reg, unsigned short val)
{
struct ensoniq *ensoniq = ac97->private_data;
- unsigned int t, x, flag;
+ unsigned int t, x;
- flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0;
mutex_lock(&ensoniq->src_mutex);
for (t = 0; t < POLL_COUNT; t++) {
if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) {
0x00010000)
break;
}
- outl(ES_1371_CODEC_WRITE(reg, val) | flag,
- ES_REG(ensoniq, 1371_CODEC));
+ outl(ES_1371_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1371_CODEC));
/* restore SRC reg */
snd_es1371_wait_src_ready(ensoniq);
outl(x, ES_REG(ensoniq, 1371_SMPRATE));
unsigned short reg)
{
struct ensoniq *ensoniq = ac97->private_data;
- unsigned int t, x, flag, fail = 0;
+ unsigned int t, x, fail = 0;
- flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0;
__again:
mutex_lock(&ensoniq->src_mutex);
for (t = 0; t < POLL_COUNT; t++) {
0x00010000)
break;
}
- outl(ES_1371_CODEC_READS(reg) | flag,
- ES_REG(ensoniq, 1371_CODEC));
+ outl(ES_1371_CODEC_READS(reg), ES_REG(ensoniq, 1371_CODEC));
/* restore SRC reg */
snd_es1371_wait_src_ready(ensoniq);
outl(x, ES_REG(ensoniq, 1371_SMPRATE));
/* now wait for the stinkin' data (RDY) */
for (t = 0; t < POLL_COUNT; t++) {
if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) {
- if (is_ev1938(ensoniq)) {
- for (t = 0; t < 100; t++)
- inl(ES_REG(ensoniq, CONTROL));
- x = inl(ES_REG(ensoniq, 1371_CODEC));
- }
mutex_unlock(&ensoniq->src_mutex);
return ES_1371_CODEC_READ(x);
}
snd_print_pcm_rates(a->rates, buf, sizeof(buf));
if (a->format == AUDIO_CODING_TYPE_LPCM)
- snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
+ snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
else if (a->max_bitrate)
snprintf(buf2, sizeof(buf2),
", max bitrate = %d", a->max_bitrate);
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
- SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
/* available models */
enum {
- CS420X_MBP53,
CS420X_MBP55,
- CS420X_IMAC27,
CS420X_AUTO,
CS420X_MODELS
};
AC_VERB_SET_PIN_WIDGET_CONTROL,
hp_present ? 0 : PIN_OUT);
}
- if (spec->board_config == CS420X_MBP53 ||
- spec->board_config == CS420X_MBP55 ||
- spec->board_config == CS420X_IMAC27) {
+ if (spec->board_config == CS420X_MBP55) {
unsigned int gpio = hp_present ? 0x02 : 0x08;
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, gpio);
}
static const char *cs420x_models[CS420X_MODELS] = {
- [CS420X_MBP53] = "mbp53",
[CS420X_MBP55] = "mbp55",
- [CS420X_IMAC27] = "imac27",
[CS420X_AUTO] = "auto",
};
static struct snd_pci_quirk cs420x_cfg_tbl[] = {
- SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
- SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
- SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
- SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
{} /* terminator */
};
u32 val;
};
-static struct cs_pincfg mbp53_pincfgs[] = {
- { 0x09, 0x012b4050 },
- { 0x0a, 0x90100141 },
- { 0x0b, 0x90100140 },
- { 0x0c, 0x018b3020 },
- { 0x0d, 0x90a00110 },
- { 0x0e, 0x400000f0 },
- { 0x0f, 0x01cbe030 },
- { 0x10, 0x014be060 },
- { 0x12, 0x400000f0 },
- { 0x15, 0x400000f0 },
- {} /* terminator */
-};
-
static struct cs_pincfg mbp55_pincfgs[] = {
{ 0x09, 0x012b4030 },
{ 0x0a, 0x90100121 },
{} /* terminator */
};
-static struct cs_pincfg imac27_pincfgs[] = {
- { 0x09, 0x012b4050 },
- { 0x0a, 0x90100140 },
- { 0x0b, 0x90100142 },
- { 0x0c, 0x018b3020 },
- { 0x0d, 0x90a00110 },
- { 0x0e, 0x400000f0 },
- { 0x0f, 0x01cbe030 },
- { 0x10, 0x014be060 },
- { 0x12, 0x01ab9070 },
- { 0x15, 0x400000f0 },
- {} /* terminator */
-};
-
static struct cs_pincfg *cs_pincfgs[CS420X_MODELS] = {
- [CS420X_MBP53] = mbp53_pincfgs,
[CS420X_MBP55] = mbp55_pincfgs,
- [CS420X_IMAC27] = imac27_pincfgs,
};
static void fix_pincfg(struct hda_codec *codec, int model)
fix_pincfg(codec, spec->board_config);
switch (spec->board_config) {
- case CS420X_IMAC27:
- case CS420X_MBP53:
case CS420X_MBP55:
/* GPIO1 = headphones */
/* GPIO3 = speakers */
struct conexant_spec *spec;
struct conexant_jack *jack;
const char *name;
- int i, err;
+ int err;
spec = codec->spec;
snd_array_init(&spec->jacks, sizeof(*jack), 32);
-
- jack = spec->jacks.list;
- for (i = 0; i < spec->jacks.used; i++, jack++)
- if (jack->nid == nid)
- return 0 ; /* already present */
-
jack = snd_array_new(&spec->jacks);
name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
case 0x10ec0883:
case 0x10ec0885:
case 0x10ec0887:
- /*case 0x10ec0889:*/ /* this causes an SPDIF problem */
+ case 0x10ec0889:
alc889_coef_init(codec);
break;
case 0x10ec0888:
struct sigmatel_spec *spec = codec->spec;
unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
const struct hda_input_mux *imux = spec->input_mux;
- unsigned int idx, prev_idx, didx;
+ unsigned int idx, prev_idx;
idx = ucontrol->value.enumerated.item[0];
if (idx >= imux->num_items)
snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0,
AC_VERB_SET_CONNECT_SEL,
imux->items[idx].index);
- if (prev_idx >= spec->num_analog_muxes &&
- spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) {
+ if (prev_idx >= spec->num_analog_muxes) {
imux = spec->dinput_mux;
/* 0 = analog */
snd_hda_codec_write_cache(codec,
}
} else {
imux = spec->dinput_mux;
- /* first dimux item is hardcoded to select analog imux,
- * so lets skip it
- */
- didx = idx - spec->num_analog_muxes + 1;
snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0,
AC_VERB_SET_CONNECT_SEL,
- imux->items[didx].index);
+ imux->items[idx - 1].index);
}
spec->cur_mux[adc_idx] = idx;
return 1;
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/string.h>
#include <sound/core.h>
#include <asm/io.h>
#include <asm/irq.h>
#define DBG(fmt...)
#endif
-#define IS_G4DA (machine_is_compatible("PowerMac3,4"))
-
/* i2c address for tumbler */
#define TAS_I2C_ADDR 0x34
gp->inactive_val = (*base) ? 0x4 : 0x5;
} else {
const u32 *prop = NULL;
- gp->active_state = IS_G4DA
- && !strncmp(device, "keywest-gpio1", 13);
+ gp->active_state = 0;
gp->active_val = 0x4;
gp->inactive_val = 0x5;
/* Here are some crude hacks to extract the GPIO polarity and
if (irq <= NO_IRQ)
irq = tumbler_find_device("line-output-detect",
NULL, &mix->line_detect, 1);
- if (IS_G4DA && irq <= NO_IRQ)
- irq = tumbler_find_device("keywest-gpio16",
- NULL, &mix->line_detect, 1);
mix->lineout_irq = irq;
tumbler_reset_audio(chip);
pr_debug("%s : sport %d\n", __func__, dai->id);
if (!dai->active)
return 0;
- if (dai->capture_active)
+ if (dai->capture.active)
sport_rx_stop(sport);
- if (dai->playback_active)
+ if (dai->playback.active)
sport_tx_stop(sport);
return 0;
}
WM8990_VMIDTOG);
/* Delay to allow output caps to discharge */
- msleep(300);
+ msleep(msecs_to_jiffies(300));
/* Disable VMIDTOG */
snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
/* Enable outputs */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
- msleep(50);
+ msleep(msecs_to_jiffies(50));
/* Enable VMID at 2x50k */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
- msleep(100);
+ msleep(msecs_to_jiffies(100));
/* Enable VREF */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
- msleep(600);
+ msleep(msecs_to_jiffies(600));
/* Enable BUFIOEN */
snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
/* Disable VMID */
snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
- msleep(300);
+ msleep(msecs_to_jiffies(300));
/* Enable all output discharge bits */
snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
{ "SPKL", "Input Switch", "MIXINL" },
{ "SPKL", "IN1LP Switch", "IN1LP" },
- { "SPKL", "Output Switch", "Left Output PGA" },
+ { "SPKL", "Output Switch", "Left Output Mixer" },
{ "SPKL", NULL, "TOCLK" },
{ "SPKR", "Input Switch", "MIXINR" },
{ "SPKR", "IN1RP Switch", "IN1RP" },
- { "SPKR", "Output Switch", "Right Output PGA" },
+ { "SPKR", "Output Switch", "Right Output Mixer" },
{ "SPKR", NULL, "TOCLK" },
{ "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
{ "SPKOUTRP", NULL, "SPKR Driver" },
{ "SPKOUTRN", NULL, "SPKR Driver" },
- { "Left Headphone Mux", "Mixer", "Left Output PGA" },
- { "Right Headphone Mux", "Mixer", "Right Output PGA" },
+ { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
+ { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
{ "Headphone PGA", NULL, "Left Headphone Mux" },
{ "Headphone PGA", NULL, "Right Headphone Mux" },
strcpy(hw->name, SNDRV_EMUX_HWDEP_NAME);
hw->iface = SNDRV_HWDEP_IFACE_EMUX_WAVETABLE;
hw->ops.ioctl = snd_emux_hwdep_ioctl;
- /* The ioctl parameter types are compatible between 32- and
- * 64-bit architectures, so use the same function. */
- hw->ops.ioctl_compat = snd_emux_hwdep_ioctl;
hw->exclusive = 1;
hw->private_data = emu;
if ((err = snd_card_register(emu->card)) < 0)
}
dev->pcm->private_data = dev;
- strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name));
+ strcpy(dev->pcm->name, dev->product_name);
memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
if (ret < 0)
return ret;
- strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name));
+ strcpy(rmidi->name, device->product_name);
rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = device;
struct file *file, poll_table *wait)
{
struct us122l *us122l = hw->private_data;
+ struct usb_stream *s = us122l->sk.s;
unsigned *polled;
unsigned int mask;
poll_wait(file, &us122l->sk.sleep, wait);
- mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
- if (mutex_trylock(&us122l->mutex)) {
- struct usb_stream *s = us122l->sk.s;
- if (s && s->state == usb_stream_ready) {
- if (us122l->first == file)
- polled = &s->periods_polled;
- else
- polled = &us122l->second_periods_polled;
- if (*polled != s->periods_done) {
- *polled = s->periods_done;
- mask = POLLIN | POLLOUT | POLLWRNORM;
- } else
- mask = 0;
+ switch (s->state) {
+ case usb_stream_ready:
+ if (us122l->first == file)
+ polled = &s->periods_polled;
+ else
+ polled = &us122l->second_periods_polled;
+ if (*polled != s->periods_done) {
+ *polled = s->periods_done;
+ mask = POLLIN | POLLOUT | POLLWRNORM;
+ break;
}
- mutex_unlock(&us122l->mutex);
+ /* Fall through */
+ mask = 0;
+ break;
+ default:
+ mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
+ break;
}
return mask;
}
{
struct usb_stream_config *cfg;
struct us122l *us122l = hw->private_data;
- struct usb_stream *s;
unsigned min_period_frames;
int err = 0;
bool high_speed;
snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
mutex_lock(&us122l->mutex);
- s = us122l->sk.s;
if (!us122l->master)
us122l->master = file;
else if (us122l->master != file) {
- if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
+ if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
err = -EIO;
goto unlock;
}
us122l->slave = file;
}
- if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
- s->state == usb_stream_xrun) {
+ if (!us122l->sk.s ||
+ memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
+ us122l->sk.s->state == usb_stream_xrun) {
us122l_stop(us122l);
if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
err = -EIO;
mutex_unlock(&us122l->mutex);
free:
kfree(cfg);
- wake_up_all(&us122l->sk.sleep);
return err;
}
all::
# Define V=1 to have a more verbose compile.
-# Define V=2 to have an even more verbose compile.
#
# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf()
# or vsnprintf() return -1 instead of number of characters which would
# Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call
# your external grep (e.g., if your system lacks grep, if its grep is
# broken, or spawning external process is slower than built-in grep perf has).
-#
-# Define LDFLAGS=-static to build a static binary.
-#
-# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@$(SHELL_PATH) util/PERF-VERSION-GEN
uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+#
+# Add -m32 for cross-builds:
+#
+ifdef NO_64BIT
+ MBITS := -m32
+else
+ #
+ # If we're on a 64-bit kernel, use -m64:
+ #
+ ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M))
+ MBITS := -m64
+ endif
+endif
+
# CFLAGS and LDFLAGS are for the users to override from the command line.
#
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
-ifeq ("$(origin DEBUG)", "command line")
- PERF_DEBUG = $(DEBUG)
-endif
-ifndef PERF_DEBUG
- CFLAGS_OPTIMIZE = -O6
-endif
-
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-EXTLIBS = -lpthread -lrt -lelf -lm
+CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
+LDFLAGS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
STRIP ?= strip
# explicitly what architecture to check for. Fix this up for yours..
SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
-ifeq ($(V), 2)
- QUIET_STDERR = ">/dev/null"
-else
- QUIET_STDERR = ">/dev/null 2>&1"
-endif
-
-BITBUCKET = "/dev/null"
-
-ifneq ($(shell sh -c "(echo '\#include <stdio.h>'; echo 'int main(void) { return puts(\"hi\"); }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
- BITBUCKET = .perf.dev.null
-endif
-
-ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
- CFLAGS := $(CFLAGS) -fstack-protector-all
-endif
### --- END CONFIGURATION SECTION ---
PTHREAD_LIBS =
endif
-ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
- msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
-endif
-
-ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
- ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
+ ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
BASIC_CFLAGS += -DLIBELF_NO_MMAP
endif
else
- msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
+ msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
ifdef NO_DEMANGLE
BASIC_CFLAGS += -DNO_DEMANGLE
-else ifdef HAVE_CPLUS_DEMANGLE
- EXTLIBS += -liberty
- BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
else
- has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
+ has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y")
ifeq ($(has_bfd),y)
EXTLIBS += -lbfd
else
- has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
+ has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y")
ifeq ($(has_bfd_iberty),y)
EXTLIBS += -lbfd -liberty
else
- has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
+ has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y")
ifeq ($(has_bfd_iberty_z),y)
EXTLIBS += -lbfd -liberty -lz
else
- has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
+ has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -liberty > /dev/null 2>&1 && echo y")
ifeq ($(has_cplus_demangle),y)
EXTLIBS += -liberty
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
else
- msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
+ msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling)
BASIC_CFLAGS += -DNO_DEMANGLE
endif
endif
SHELL = $(SHELL_PATH)
-all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS
+all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS
ifneq (,$X)
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
endif
.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
.PHONY: .FORCE-PERF-BUILD-OPTIONS
-.perf.dev.null:
- touch .perf.dev.null
-
-.INTERMEDIATE: .perf.dev.null
-
### Make sure built-ins do not have dups and listed in perf.c
#
check-builtins::