* wait for the flag to change, indicating this kernel is going away but
* the slave code for the next one is at addresses 0 to 100.
*
- * This is used by all slaves, even those that did not find a matching
- * paca in the secondary startup code.
+ * This is used by all slaves.
*
* Physical (hardware) cpu id should be in r3.
*/
1: mflr r5
addi r5,r5,kexec_flag-1b
+ li r4,KEXEC_STATE_REAL_MODE
+ stb r4,PACAKEXECSTATE(r13)
+ SYNC
+
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
*
* get phys id from paca
* switch to real mode
- * mark the paca as no longer used
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
bl real_mode
-
- li r4,KEXEC_STATE_REAL_MODE
- stb r4,PACAKEXECSTATE(r13)
- SYNC
-
b .kexec_wait
/*
return is_kernel;
}
-static bool pmc_overflow(unsigned long val)
-{
- if ((int)val < 0)
- return true;
-
- /*
- * Events on POWER7 can roll back if a speculative event doesn't
- * eventually complete. Unfortunately in some rare cases they will
- * raise a performance monitor exception. We need to catch this to
- * ensure we reset the PMC. In all cases the PMC will be 256 or less
- * cycles from overflow.
- *
- * We only do this if the first pass fails to find any overflowing
- * PMCs because a user might set a period of less than 256 and we
- * don't want to mistakenly reset them.
- */
- if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
- return true;
-
- return false;
-}
-
static void power4_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
val = classic_ctr_read(i);
- if (pmc_overflow(val)) {
+ if (val < 0) {
if (oprofile_running && ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel);
classic_ctr_write(i, reset_value[i]);
* Returns 0 if the range is valid, nonzero otherwise.
*
* This is equivalent to the following test:
- * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
+ * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
*
* This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
*/
{
u8 *p = (u8 *)h;
u8 *end = p, flags = 0;
- u16 devid = 0, devid_start = 0, devid_to = 0;
- u32 dev_i, ext_flags = 0;
+ u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
+ u32 ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
/* Initializes the device->iommu mapping for the driver */
static int __init init_iommu_devices(struct amd_iommu *iommu)
{
- u32 i;
+ u16 i;
for (i = iommu->first_device; i <= iommu->last_device; ++i)
set_iommu_for_device(iommu, i);
*/
static void init_device_table(void)
{
- u32 devid;
+ u16 devid;
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
set_dev_entry_bit(devid, DEV_ENTRY_VALID);
}
#endif
- /*
- * Family 0x12 and above processors have APIC timer
- * running in deep C states.
- */
- if (c->x86 > 0x11)
+ /* As a rule processors have APIC timer running in deep C states */
+ if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
set_cpu_cap(c, X86_FEATURE_ARAT);
/*
* Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
*/
u64 mask;
- int err;
- err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
- if (err == 0) {
- mask |= (1 << 10);
- checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
- }
+ rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
+ mask |= (1 << 10);
+ wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
}
}
{
set_user_gs(regs, 0);
regs->fs = 0;
+ set_fs(USER_DS);
regs->ds = __USER_DS;
regs->es = __USER_DS;
regs->ss = __USER_DS;
regs->cs = __USER_CS;
regs->ss = __USER_DS;
regs->flags = 0x200;
+ set_fs(USER_DS);
/*
* Free the old FP and other extended state
*/
addq %rdx,%rcx
jc bad_to_user
cmpq TI_addr_limit(%rax),%rcx
- ja bad_to_user
+ jae bad_to_user
ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
CFI_ENDPROC
ENDPROC(copy_to_user)
addq %rdx,%rcx
jc bad_from_user
cmpq TI_addr_limit(%rax),%rcx
- ja bad_from_user
+ jae bad_from_user
ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
CFI_ENDPROC
ENDPROC(copy_from_user)
active_mm = percpu_read(cpu_tlbstate.active_mm);
- if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
+ if (active_mm == mm)
leave_mm(smp_processor_id());
/* If this cpu still has a stale cr3 reference, then make sure
unsigned argidx = roundup(b->argidx, sizeof(u64));
BUG_ON(preemptible());
- BUG_ON(b->argidx >= MC_ARGS);
+ BUG_ON(b->argidx > MC_ARGS);
if (b->mcidx == MC_BATCH ||
- (argidx + args) >= MC_ARGS) {
+ (argidx + args) > MC_ARGS) {
mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
xen_mc_flush();
argidx = roundup(b->argidx, sizeof(u64));
ret.args = &b->args[argidx];
b->argidx = argidx + args;
- BUG_ON(b->argidx >= MC_ARGS);
+ BUG_ON(b->argidx > MC_ARGS);
return ret;
}
struct multicall_space ret = { NULL, NULL };
BUG_ON(preemptible());
- BUG_ON(b->argidx >= MC_ARGS);
+ BUG_ON(b->argidx > MC_ARGS);
if (b->mcidx == 0)
return ret;
if (b->entries[b->mcidx - 1].op != op)
return ret;
- if ((b->argidx + size) >= MC_ARGS)
+ if ((b->argidx + size) > MC_ARGS)
return ret;
ret.mc = &b->entries[b->mcidx - 1];
ret.args = &b->args[b->argidx];
b->argidx += size;
- BUG_ON(b->argidx >= MC_ARGS);
+ BUG_ON(b->argidx > MC_ARGS);
return ret;
}
trace_block_unplug_timer(q);
kblockd_schedule_work(q, &q->unplug_work);
}
-EXPORT_SYMBOL(blk_put_queue);
void blk_unplug(struct request_queue *q)
{
return 1;
}
-EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
return rq;
}
- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
- !q->elevator->ops->elevator_dispatch_fn(q, 0))
+ if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
return NULL;
}
}
if (ACPI_IS_ROOT_DEVICE(device)) {
acpi_add_id(device, ACPI_SYSTEM_HID);
break;
+ } else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
+ /* \_SB_, the only root-level namespace device */
+ acpi_add_id(device, ACPI_BUS_HID);
+ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+ strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
+ break;
}
status = acpi_get_object_info(device->handle, &info);
acpi_add_id(device, ACPI_BAY_HID);
else if (ACPI_SUCCESS(acpi_dock_match(device)))
acpi_add_id(device, ACPI_DOCK_HID);
- else if (!acpi_device_hid(device) &&
- ACPI_IS_ROOT_DEVICE(device->parent)) {
- acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
- strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
- strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
- }
break;
case ACPI_BUS_TYPE_POWER:
* pata_cmd64x.c - CMD64x PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@lxorguk.ukuu.org.uk>
- * (C) 2009-2010 Bartlomiej Zolnierkiewicz
*
* Based upon
* linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
enum {
CFR = 0x50,
- CFR_INTR_CH0 = 0x04,
- CNTRL = 0x51,
- CNTRL_CH0 = 0x04,
- CNTRL_CH1 = 0x08,
+ CFR_INTR_CH0 = 0x02,
+ CNTRL = 0x51,
+ CNTRL_DIS_RA0 = 0x40,
+ CNTRL_DIS_RA1 = 0x80,
+ CNTRL_ENA_2ND = 0x08,
CMDTIM = 0x52,
ARTTIM0 = 0x53,
DRWTIM0 = 0x54,
ARTTIM23_DIS_RA2 = 0x04,
ARTTIM23_DIS_RA3 = 0x08,
ARTTIM23_INTR_CH1 = 0x10,
+ ARTTIM2 = 0x57,
+ ARTTIM3 = 0x57,
+ DRWTIM23 = 0x58,
DRWTIM2 = 0x58,
BRST = 0x59,
DRWTIM3 = 0x5b,
MRDMODE = 0x71,
MRDMODE_INTR_CH0 = 0x04,
MRDMODE_INTR_CH1 = 0x08,
+ MRDMODE_BLK_CH0 = 0x10,
+ MRDMODE_BLK_CH1 = 0x20,
BMIDESR0 = 0x72,
UDIDETCR0 = 0x73,
DTPR0 = 0x74,
BMIDECR1 = 0x78,
BMIDECSR = 0x79,
+ BMIDESR1 = 0x7A,
UDIDETCR1 = 0x7B,
DTPR1 = 0x7C
};
/* Now convert the clocks into values we can actually stuff into
the chip */
- if (t.recover == 16)
- t.recover = 0;
- else if (t.recover > 1)
+ if (t.recover > 1)
t.recover--;
else
t.recover = 15;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 dma_intr;
int dma_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
- int dma_reg = ap->port_no ? ARTTIM23 : CFR;
+ int dma_reg = ap->port_no ? ARTTIM2 : CFR;
ata_bmdma_stop(qc);
static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ u32 class_rev;
+
static const struct ata_port_info cmd_info[6] = {
{ /* CMD 643 - no UDMA */
.flags = ATA_FLAG_SLAVE_POSS,
.port_ops = &cmd648_port_ops
}
};
- const struct ata_port_info *ppi[] = {
- &cmd_info[id->driver_data],
- &cmd_info[id->driver_data],
- NULL
- };
- u8 mrdmode, reg;
+ const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
+ u8 mrdmode;
int rc;
- struct pci_dev *bridge = pdev->bus->self;
- /* mobility split bridges don't report enabled ports correctly */
- int port_ok = !(bridge && bridge->vendor ==
- PCI_VENDOR_ID_MOBILITY_ELECTRONICS);
- /* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */
- int cntrl_ch0_ok = (id->driver_data != 0);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
if (id->driver_data == 0) /* 643 */
ata_pci_bmdma_clear_simplex(pdev);
if (pdev->device == PCI_DEVICE_ID_CMD_646) {
/* Does UDMA work ? */
- if (pdev->revision > 4) {
+ if (class_rev > 4)
ppi[0] = &cmd_info[2];
- ppi[1] = &cmd_info[2];
- }
/* Early rev with other problems ? */
- else if (pdev->revision == 1) {
+ else if (class_rev == 1)
ppi[0] = &cmd_info[3];
- ppi[1] = &cmd_info[3];
- }
- /* revs 1,2 have no CNTRL_CH0 */
- if (pdev->revision < 3)
- cntrl_ch0_ok = 0;
}
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
mrdmode |= 0x02; /* Memory read line enable */
pci_write_config_byte(pdev, MRDMODE, mrdmode);
- /* check for enabled ports */
- pci_read_config_byte(pdev, CNTRL, ®);
- if (!port_ok)
- dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
- if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
- ppi[0] = &ata_dummy_port_info;
-
- }
- if (port_ok && !(reg & CNTRL_CH1)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
- ppi[1] = &ata_dummy_port_info;
- }
-
/* Force PIO 0 here.. */
/* PPC specific fixup copied from old driver */
const struct ata_port_info *ppi[] = { &info_hpt366, NULL };
void *hpriv = NULL;
+ u32 class_rev;
u32 reg1;
int rc;
if (rc)
return rc;
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
/* May be a later chip in disguise. Check */
/* Newer chips are not in the HPT36x driver. Ignore them */
- if (dev->revision > 2)
- return -ENODEV;
+ if (class_rev > 2)
+ return -ENODEV;
hpt36x_init_chipset(dev);
static const int MHz[4] = { 33, 40, 50, 66 };
void *private_data = NULL;
const struct ata_port_info *ppi[] = { NULL, NULL };
- u8 rev = dev->revision;
+
u8 irqmask;
+ u32 class_rev;
u8 mcr1;
u32 freq;
int prefer_dpll = 1;
if (rc)
return rc;
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
/* May be a later chip in disguise. Check */
/* Older chips are in the HPT366 driver. Ignore them */
- if (rev < 3)
+ if (class_rev < 3)
return -ENODEV;
/* N series chips have their own driver. Ignore */
- if (rev == 6)
+ if (class_rev == 6)
return -ENODEV;
- switch(rev) {
+ switch(class_rev) {
case 3:
ppi[0] = &info_hpt370;
chip_table = &hpt370;
chip_table = &hpt372;
break;
default:
- printk(KERN_ERR "pata_hpt37x: Unknown HPT366 "
- "subtype, please report (%d).\n", rev);
+ printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype please report (%d).\n", class_rev);
return -ENODEV;
}
} else {
switch(dev->device) {
case PCI_DEVICE_ID_TTI_HPT372:
/* 372N if rev >= 2*/
- if (rev >= 2)
+ if (class_rev >= 2)
return -ENODEV;
ppi[0] = &info_hpt372;
chip_table = &hpt372a;
break;
case PCI_DEVICE_ID_TTI_HPT302:
/* 302N if rev > 1 */
- if (rev > 1)
+ if (class_rev > 1)
return -ENODEV;
ppi[0] = &info_hpt372;
/* Check this */
chip_table = &hpt302;
break;
case PCI_DEVICE_ID_TTI_HPT371:
- if (rev > 1)
+ if (class_rev > 1)
return -ENODEV;
ppi[0] = &info_hpt372;
chip_table = &hpt371;
.port_ops = &hpt3x2n_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
- u8 rev = dev->revision;
+
u8 irqmask;
+ u32 class_rev;
+
unsigned int pci_mhz;
unsigned int f_low, f_high;
int adjust;
if (rc)
return rc;
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
switch(dev->device) {
case PCI_DEVICE_ID_TTI_HPT366:
- if (rev < 6)
+ if (class_rev < 6)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT371:
- if (rev < 2)
+ if (class_rev < 2)
return -ENODEV;
/* 371N if rev > 1 */
break;
case PCI_DEVICE_ID_TTI_HPT372:
/* 372N if rev >= 2*/
- if (rev < 2)
+ if (class_rev < 2)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT302:
- if (rev < 2)
+ if (class_rev < 2)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT372N:
static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
{
+ u32 class_rev = 0;
u8 tmpbyte = 0;
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xff;
/* FIXME: double check */
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
- pdev->revision ? 1 : 255);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255);
pci_write_config_byte(pdev, 0x80, 0x00);
pci_write_config_byte(pdev, 0x84, 0x00);
struct kobject *kobj;
mutex_lock(&brd_devices_mutex);
- brd = brd_init_one(MINOR(dev) >> part_shift);
+ brd = brd_init_one(dev & MINORMASK);
kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
mutex_unlock(&brd_devices_mutex);
if (max_part > 0)
part_shift = fls(max_part);
- if ((1UL << part_shift) > DISK_MAX_PARTS)
- return -EINVAL;
-
if (rd_nr > 1UL << (MINORBITS - part_shift))
return -EINVAL;
if (rd_nr) {
nr = rd_nr;
- range = rd_nr << part_shift;
+ range = rd_nr;
} else {
nr = CONFIG_BLK_DEV_RAM_COUNT;
- range = 1UL << MINORBITS;
+ range = 1UL << (MINORBITS - part_shift);
}
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
unsigned long range;
struct brd_device *brd, *next;
- range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
+ range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd);
struct kobject *kobj;
mutex_lock(&loop_devices_mutex);
- lo = loop_init_one(MINOR(dev) >> part_shift);
+ lo = loop_init_one(dev & MINORMASK);
kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
mutex_unlock(&loop_devices_mutex);
if (max_part > 0)
part_shift = fls(max_part);
- if ((1UL << part_shift) > DISK_MAX_PARTS)
- return -EINVAL;
-
if (max_loop > 1UL << (MINORBITS - part_shift))
return -EINVAL;
if (max_loop) {
nr = max_loop;
- range = max_loop << part_shift;
+ range = max_loop;
} else {
nr = 8;
- range = 1UL << MINORBITS;
+ range = 1UL << (MINORBITS - part_shift);
}
if (register_blkdev(LOOP_MAJOR, "loop"))
unsigned long range;
struct loop_device *lo, *next;
- range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
+ range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
loop_del_one(lo);
if (max_part > 0)
part_shift = fls(max_part);
- if ((1UL << part_shift) > DISK_MAX_PARTS)
- return -EINVAL;
-
- if (nbds_max > 1UL << (MINORBITS - part_shift))
- return -EINVAL;
-
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
"movl %%edi,20(%%rax)\n\t"
"popq %%rdx\n\t"
"movl %%edx,0(%%rax)\n\t"
- "pushfq\n\t"
- "popq %%rax\n\t"
+ "lahf\n\t"
+ "shrl $8,%%eax\n\t"
"andl $1,%%eax\n"
:"=a"(rc)
: "a"(regs)
unlock_policy_rwsem_write(cpu);
- cpufreq_debug_enable_ratelimit();
-
-#ifdef CONFIG_HOTPLUG_CPU
- /* when the CPU which is the parent of the kobj is hotplugged
- * offline, check for siblings, and create cpufreq sysfs interface
- * and symlinks
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- /* first sibling now owns the new sysfs dir */
- cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
-
- /* finally remove our own symlink */
- lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(sys_dev);
- }
-#endif
-
free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus);
kfree(data);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ cpufreq_debug_enable_ratelimit();
return 0;
}
return -1;
}
-/* should be called late in the CPU removal sequence so that the stats
- * memory is still available in case someone tries to use it.
- */
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ if (policy && policy->cpu == cpu)
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
if (stat) {
kfree(stat->time_in_state);
kfree(stat);
}
per_cpu(cpufreq_stats_table, cpu) = NULL;
-}
-
-/* must be called early in the CPU removal sequence (before
- * cpufreq_remove_dev) so that policy is still valid.
- */
-static void cpufreq_stats_free_sysfs(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (policy && policy->cpu == cpu)
- sysfs_remove_group(&policy->kobj, &stats_attr_group);
if (policy)
cpufreq_cpu_put(policy);
}
case CPU_ONLINE_FROZEN:
cpufreq_update_policy(cpu);
break;
- case CPU_DOWN_PREPARE:
- cpufreq_stats_free_sysfs(cpu);
- break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
return NOTIFY_OK;
}
-/* priority=1 so this will get called before cpufreq_remove_dev */
static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
{
.notifier_call = cpufreq_stat_cpu_callback,
- .priority = 1,
};
static struct notifier_block notifier_policy_block = {
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
cpufreq_stats_free_table(cpu);
- cpufreq_stats_free_sysfs(cpu);
}
}
int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
int i;
int multiplier;
- struct timespec t;
if (data->needs_update) {
menu_update(dev);
return 0;
/* determine the expected residency time, round up */
- t = ktime_to_timespec(tick_nohz_get_sleep_length());
data->expected_us =
- t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
+ DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
data->bucket = which_bucket(data->expected_us);
DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
},
},
- {
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Asus EeeBox PC EB1007",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
- },
- },
{ } /* terminating entry */
};
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
- rdev->need_dma32 = true;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
- /*
- * Some devices exist without request functions,
- * such as loop devices not yet bound to backing files.
- * Forbid the use of such devices.
- */
- q = bdev_get_queue(bdev);
- if (!q || !q->make_request_fn) {
- DMWARN("%s: %s is not yet initialised: "
- "start=%llu, len=%llu, dev_size=%llu",
- dm_device_name(ti->table->md), bdevname(bdev, b),
- (unsigned long long)start,
- (unsigned long long)len,
- (unsigned long long)dev_size);
- return 1;
- }
-
if (!dev_size)
return 0;
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
- if (rdev->mddev->pers->hot_remove_disk == NULL)
+ if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
hot_remove_disk(rdev->mddev, rdev->raid_disk);
static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
{
- bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
+ bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
}
/* Find first data disk in a raid6 stripe */
bi = &sh->dev[i].req;
bi->bi_rw = rw;
- if (rw & WRITE)
+ if (rw == WRITE)
bi->bi_end_io = raid5_end_write_request;
else
bi->bi_end_io = raid5_end_read_request;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
- if ((rw & WRITE) &&
+ if (rw == WRITE &&
test_bit(R5_ReWrite, &sh->dev[i].flags))
atomic_add(STRIPE_SECTORS,
&rdev->corrected_errors);
generic_make_request(bi);
} else {
- if (rw & WRITE)
+ if (rw == WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- ops->retlen = ops->oobretlen = 0;
+ ops->retlen = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
devops.len = subdev->size - to;
err = subdev->write_oob(subdev, to, &devops);
- ops->retlen += devops.oobretlen;
+ ops->retlen += devops.retlen;
if (err)
return err;
*/
}
-/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
-static int pci_set_payload(struct pci_dev *dev)
-{
- int pos, ppos;
- u16 pctl, psz;
- u16 dctl, dsz, dcap, dmax;
- struct pci_dev *parent;
-
- parent = dev->bus->self;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
- return 0;
-
- /* Read Device MaxPayload capability and setting */
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
- pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
- dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
- dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
-
- /* Read Parent MaxPayload setting */
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
- if (!ppos)
- return 0;
- pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
- psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
-
- /* If parent payload > device max payload -> error
- * If parent payload > device payload -> set speed
- * If parent payload <= device payload -> do nothing
- */
- if (psz > dmax)
- return -1;
- else if (psz > dsz) {
- dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
- (psz << 5));
- }
- return 0;
-}
-
void pci_configure_slot(struct pci_dev *dev)
{
struct pci_dev *cdev;
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
- ret = pci_set_payload(dev);
- if (ret)
- dev_warn(&dev->dev, "could not set device max payload\n");
-
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
if (ret)
continue; /* Wrong type */
if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
return r; /* Exact match */
- /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
- if (r->flags & IORESOURCE_PREFETCH)
- continue;
- /* .. but we can put a prefetchable resource inside a non-prefetchable one */
- if (!best)
- best = r;
+ if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
+ best = r; /* Approximating prefetchable by non-prefetchable */
}
return best;
}
#endif /* CONFIG_PCI_MSI */
-static void __devinit fixup_ti816x_class(struct pci_dev* dev)
-{
- /* TI 816x devices do not have class code set when in PCIe boot mode */
- if (dev->class == PCI_CLASS_NOT_DEFINED) {
- dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
- dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
- }
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
-
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
kfree(sdev);
goto out;
}
- blk_get_queue(sdev->request_queue);
+
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
sdev = scsi_alloc_sdev(starget, 0, NULL);
if (!sdev)
return 0;
- if (scsi_device_get(sdev)) {
- __scsi_remove_device(sdev);
+ if (scsi_device_get(sdev))
return 0;
- }
}
sprintf(devname, "host %d channel %d id %d",
goto out;
sdev = scsi_alloc_sdev(starget, 0, NULL);
- if (sdev)
+ if (sdev) {
+ sdev->sdev_gendev.parent = get_device(&starget->dev);
sdev->borken = 0;
- else
+ } else
scsi_target_reap(starget);
put_device(&starget->dev);
out:
kfree(evt);
}
- blk_put_queue(sdev->request_queue);
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- error = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (error)
+ if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
return error;
error = scsi_target_add(starget);
error = device_add(&sdev->sdev_gendev);
if (error) {
printk(KERN_INFO "error 1\n");
- return error;
+ goto out_remove;
}
error = device_add(&sdev->sdev_dev);
if (error) {
printk(KERN_INFO "error 2\n");
device_del(&sdev->sdev_gendev);
- return error;
+ goto out_remove;
}
transport_add_device(&sdev->sdev_gendev);
sdev->is_visible = 1;
else
error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
if (error)
- return error;
+ goto out_remove;
if (sdev->host->hostt->change_queue_type)
error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
else
error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
if (error)
- return error;
+ goto out_remove;
error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
error = device_create_file(&sdev->sdev_gendev,
sdev->host->hostt->sdev_attrs[i]);
if (error)
- return error;
+ goto out_remove;
}
}
+ return 0;
+
+ out_remove:
+ __scsi_remove_device(sdev);
return error;
+
}
void __scsi_remove_device(struct scsi_device *sdev)
"0: bsfw %1,%w0\n\t"
"btr %0,%1\n\t"
"jnc 0b"
- : "=&r" (rv), "+m" (*field) :);
+ : "=&r" (rv), "=m" (*field) :);
return rv;
}
be32_to_cpus(&pdu->status);
be32_to_cpus(&pdu->actual_length);
be32_to_cpus(&pdu->start_frame);
- be32_to_cpus(&pdu->number_of_packets);
+ cpu_to_be32s(&pdu->number_of_packets);
be32_to_cpus(&pdu->error_count);
}
}
{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
- { NOKIA_PCSUITE_ACM_INFO(0x0335), }, /* Nokia E7 */
- { NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
{
int i, status = -ETIMEDOUT;
- for (i = 0; i < USB_STS_RETRIES &&
- (status == -ETIMEDOUT || status == -EPIPE); i++) {
+ for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
data, sizeof(*data), USB_STS_TIMEOUT);
{
int i, status = -ETIMEDOUT;
- for (i = 0; i < USB_STS_RETRIES &&
- (status == -ETIMEDOUT || status == -EPIPE); i++) {
+ for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
data, sizeof(*data), USB_STS_TIMEOUT);
}
/* newer chips have more FIFO memory than rm9200 */
- if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
+ if (cpu_is_at91sam9260()) {
udc->ep[0].maxpacket = 64;
udc->ep[3].maxpacket = 64;
udc->ep[4].maxpacket = 512;
*/
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SEND_ENCAPSULATED_COMMAND:
- if (w_value || w_index != rndis->ctrl_id)
+ if (w_length > req->length || w_value
+ || w_index != rndis->ctrl_id)
goto invalid;
/* read the request; process it later */
value = w_length;
*/
static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
{
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- /* Evidently nVidia fixed their later hardware; this is a guess at
- * the changeover point.
- */
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
-
- if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
- ohci->flags |= OHCI_QUIRK_SHUTDOWN;
- ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
- }
+ ohci->flags |= OHCI_QUIRK_SHUTDOWN;
+ ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
return 0;
}
interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
if (interval != ep->desc.bInterval - 1)
dev_warn(&udev->dev,
- "ep %#x - rounding interval to %d %sframes\n",
+ "ep %#x - rounding interval to %d microframes\n",
ep->desc.bEndpointAddress,
- 1 << interval,
- udev->speed == USB_SPEED_FULL ? "" : "micro");
-
- if (udev->speed == USB_SPEED_FULL) {
- /*
- * Full speed isoc endpoints specify interval in frames,
- * not microframes. We are using microframes everywhere,
- * so adjust accordingly.
- */
- interval += 3; /* 1 frame = 2^3 uframes */
- }
+ 1 << interval);
return interval;
}
break;
case USB_SPEED_FULL:
- if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (usb_endpoint_xfer_int(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
break;
}
/*
- * Fall through for interrupt endpoint interval decoding
+ * Fall through for isochronous endpoint interval decoding
* since it uses the same rules as low speed interrupt
* endpoints.
*/
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
- { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
- { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
- { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
- { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
{ USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
- { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
/*
* ELV devices:
*/
{ USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
- { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) },
*/
#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
-#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
/*
* Linx Technologies product ids
/* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
-/*
- * TavIR AVR product ids (FTDI_VID)
- */
-#define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */
-
/********************************/
/*
* Garmin GPS driver
*
- * Copyright (C) 2006-2011 Hermann Kneissel herkne@gmx.de
+ * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net
*
* The latest version of the driver can be found at
* http://sourceforge.net/projects/garmin-gps/
*/
#define VERSION_MAJOR 0
-#define VERSION_MINOR 36
+#define VERSION_MINOR 33
#define _STR(s) #s
#define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
*/
static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
{
- unsigned long flags;
const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET;
__le32 *usbdata = (__le32 *) garmin_data_p->inbuffer;
/* if this was an abort-transfer command, flush all
queued data. */
if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
- spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_DROP_DATA;
- spin_unlock_irqrestore(&garmin_data_p->lock, flags);
pkt_clear(garmin_data_p);
}
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->mode = initial_mode;
garmin_data_p->count = 0;
- garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN;
+ garmin_data_p->flags = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
static void garmin_read_process(struct garmin_data *garmin_data_p,
- unsigned char *data, unsigned data_length,
- int bulk_data)
+ unsigned char *data, unsigned data_length)
{
unsigned long flags;
send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length);
- } else if (bulk_data ||
- getLayerId(data) == GARMIN_LAYERID_APPL) {
+ } else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN;
usb_serial_debug_data(debug, &port->dev,
__func__, urb->actual_length, data);
- garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
+ garmin_read_process(garmin_data_p, data, urb->actual_length);
if (urb->actual_length == 0 &&
0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) {
__func__, garmin_data_p->serial_num);
}
- garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
+ garmin_read_process(garmin_data_p, data, urb->actual_length);
port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(urb, GFP_ATOMIC);
garmin_data_p->timer.function = timeout_handler;
garmin_data_p->port = port;
garmin_data_p->state = 0;
- garmin_data_p->flags = 0;
garmin_data_p->count = 0;
usb_set_serial_port_data(port, garmin_data_p);
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
{ USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
{ USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
- { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */
{ USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */
{ },
};
if (irq < 0)
return irq;
- irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
+ irqflags |= IRQF_NO_SUSPEND;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
}
}
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_ipi_info(evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+
}
}
if (!bdev->bd_part)
goto out_clear;
- ret = 0;
if (disk->fops->open) {
ret = disk->fops->open(bdev, mode);
if (ret == -ERESTARTSYS) {
mutex_unlock(&bdev->bd_mutex);
goto restart;
}
+ if (ret)
+ goto out_clear;
}
- /*
- * If the device is invalidated, rescan partition
- * if open succeeded or failed with -ENOMEDIUM.
- * The latter is necessary to prevent ghost
- * partitions on a removed medium.
- */
- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
- rescan_partitions(disk, bdev);
- if (ret)
- goto out_clear;
-
if (!bdev->bd_openers) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
bdi = blk_get_backing_dev_info(bdev);
bdi = &default_backing_dev_info;
bdev->bd_inode->i_data.backing_dev_info = bdi;
}
+ if (bdev->bd_invalidated)
+ rescan_partitions(disk, bdev);
} else {
struct block_device *whole;
whole = bdget_disk(disk, 0);
put_disk(disk);
disk = NULL;
if (bdev->bd_contains == bdev) {
- ret = 0;
- if (bdev->bd_disk->fops->open)
+ if (bdev->bd_disk->fops->open) {
ret = bdev->bd_disk->fops->open(bdev, mode);
- /* the same as first opener case, read comment there */
- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
+ if (ret)
+ goto out_unlock_bdev;
+ }
+ if (bdev->bd_invalidated)
rescan_partitions(bdev->bd_disk, bdev);
- if (ret)
- goto out_unlock_bdev;
}
}
bdev->bd_openers++;
/* FIXME: (deleted) ? */
path = d_path(&dcs->path, kbuf, PAGE_SIZE);
- mutex_unlock(&dcookie_mutex);
-
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_free;
out_free:
kfree(kbuf);
- return err;
out:
mutex_unlock(&dcookie_mutex);
return err;
struct mutex *tfm_mutex;
char *block_aligned_filename;
struct ecryptfs_auth_tok *auth_tok;
- struct scatterlist src_sg[2];
- struct scatterlist dst_sg[2];
+ struct scatterlist src_sg;
+ struct scatterlist dst_sg;
struct blkcipher_desc desc;
char iv[ECRYPTFS_MAX_IV_BYTES];
char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
filename_size);
rc = virt_to_scatterlist(s->block_aligned_filename,
- s->block_aligned_filename_size, s->src_sg, 2);
- if (rc < 1) {
+ s->block_aligned_filename_size, &s->src_sg, 1);
+ if (rc != 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
- "convert filename memory to scatterlist; rc = [%d]. "
+ "convert filename memory to scatterlist; "
+ "expected rc = 1; got rc = [%d]. "
"block_aligned_filename_size = [%zd]\n", __func__, rc,
s->block_aligned_filename_size);
goto out_release_free_unlock;
}
rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
- s->dst_sg, 2);
- if (rc < 1) {
+ &s->dst_sg, 1);
+ if (rc != 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert encrypted filename memory to scatterlist; "
- "rc = [%d]. block_aligned_filename_size = [%zd]\n",
- __func__, rc, s->block_aligned_filename_size);
+ "expected rc = 1; got rc = [%d]. "
+ "block_aligned_filename_size = [%zd]\n", __func__, rc,
+ s->block_aligned_filename_size);
goto out_release_free_unlock;
}
/* The characters in the first block effectively do the job
mount_crypt_stat->global_default_fn_cipher_key_bytes);
goto out_release_free_unlock;
}
- rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
+ rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
s->block_aligned_filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt filename; "
struct mutex *tfm_mutex;
char *decrypted_filename;
struct ecryptfs_auth_tok *auth_tok;
- struct scatterlist src_sg[2];
- struct scatterlist dst_sg[2];
+ struct scatterlist src_sg;
+ struct scatterlist dst_sg;
struct blkcipher_desc desc;
char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
char iv[ECRYPTFS_MAX_IV_BYTES];
}
mutex_lock(s->tfm_mutex);
rc = virt_to_scatterlist(&data[(*packet_size)],
- s->block_aligned_filename_size, s->src_sg, 2);
- if (rc < 1) {
+ s->block_aligned_filename_size, &s->src_sg, 1);
+ if (rc != 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert encrypted filename memory to scatterlist; "
- "rc = [%d]. block_aligned_filename_size = [%zd]\n",
- __func__, rc, s->block_aligned_filename_size);
+ "expected rc = 1; got rc = [%d]. "
+ "block_aligned_filename_size = [%zd]\n", __func__, rc,
+ s->block_aligned_filename_size);
goto out_unlock;
}
(*packet_size) += s->block_aligned_filename_size;
goto out_unlock;
}
rc = virt_to_scatterlist(s->decrypted_filename,
- s->block_aligned_filename_size, s->dst_sg, 2);
- if (rc < 1) {
+ s->block_aligned_filename_size, &s->dst_sg, 1);
+ if (rc != 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert decrypted filename memory to scatterlist; "
- "rc = [%d]. block_aligned_filename_size = [%zd]\n",
- __func__, rc, s->block_aligned_filename_size);
+ "expected rc = 1; got rc = [%d]. "
+ "block_aligned_filename_size = [%zd]\n", __func__, rc,
+ s->block_aligned_filename_size);
goto out_free_unlock;
}
/* The characters in the first block effectively do the job of
mount_crypt_stat->global_default_fn_cipher_key_bytes);
goto out_free_unlock;
}
- rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
+ rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
s->block_aligned_filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to decrypt filename; "
bprm->mm = NULL; /* We're using it now */
- set_fs(USER_DS);
current->flags &= ~PF_RANDOMIZE;
flush_thread();
current->personality &= ~bprm->per_clear;
if (retval)
return retval;
+ /* kernel module loader fixup */
+ /* so we don't try to load run modprobe in kernel space. */
+ set_fs(USER_DS);
+
retval = audit_bprm(bprm);
if (retval)
return retval;
frame->at = entries;
frame->bh = bh;
bh = bh2;
- /*
- * Mark buffers dirty here so that if do_split() fails we write a
- * consistent set of buffers to disk.
- */
- ext3_journal_dirty_metadata(handle, frame->bh);
- ext3_journal_dirty_metadata(handle, bh);
de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
- if (!de) {
- ext3_mark_inode_dirty(handle, dir);
- dx_release(frames);
+ dx_release (frames);
+ if (!(de))
return retval;
- }
- dx_release(frames);
return add_dirent_to_buf(handle, dentry, inode, de, bh);
}
return 0;
err:
- if (page)
- page_cache_release(page);
if (e4b->bd_bitmap_page)
page_cache_release(e4b->bd_bitmap_page);
if (e4b->bd_buddy_page)
if (attr & ATTR_SYS)
inode->i_flags |= S_IMMUTABLE;
else
- inode->i_flags &= ~S_IMMUTABLE;
+ inode->i_flags &= S_IMMUTABLE;
}
fat_save_attrs(inode, attr);
required. */
JBUFFER_TRACE(jh, "file as BJ_Forget");
journal_file_buffer(jh, commit_transaction, BJ_Forget);
- /*
- * Wake up any transactions which were waiting for this
- * IO to complete. The barrier must be here so that changes
- * by journal_file_buffer() take effect before wake_up_bit()
- * does the waitqueue check.
- */
- smp_mb();
+ /* Wake up any transactions which were waiting for this
+ IO to complete */
wake_up_bit(&bh->b_state, BH_Unshadow);
JBUFFER_TRACE(jh, "brelse shadowed buffer");
__brelse(bh);
int __log_start_commit(journal_t *journal, tid_t target)
{
/*
- * The only transaction we can possibly wait upon is the
- * currently running transaction (if it exists). Otherwise,
- * the target tid must be an old one.
+ * Are we already doing a recent enough commit?
*/
- if (journal->j_running_transaction &&
- journal->j_running_transaction->t_tid == target) {
+ if (!tid_geq(journal->j_commit_request, target)) {
/*
* We want a new commit: OK, mark the request and wakup the
* commit thread. We do _not_ do the commit ourselves.
journal->j_commit_sequence);
wake_up(&journal->j_wait_commit);
return 1;
- } else if (!tid_geq(journal->j_commit_request, target))
- /* This should never happen, but if it does, preserve
- the evidence before kjournald goes into a loop and
- increments j_commit_sequence beyond all recognition. */
- WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
- journal->j_commit_request, journal->j_commit_sequence,
- target, journal->j_running_transaction ?
- journal->j_running_transaction->t_tid : 0);
+ }
return 0;
}
list_add_tail (&f->list, frags);
found:
- if (rec >= f->num) {
- ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
- return false;
- }
-
if (f->map & (1 << rec)) {
ldm_error ("Duplicate VBLK, part %d.", rec);
f->map &= 0x7F; /* Mark the group as broken */
out_release:
release_head(c, BASEHD);
- kfree(dent);
out_ro:
ubifs_ro_mode(c, err);
if (last_reference)
* @c: UBIFS file-system description object
*
* This function returns a pointer to the superblock node or a negative error
- * code. Note, the user of this function is responsible of kfree()'ing the
- * returned superblock buffer.
+ * code.
*/
struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
{
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
if (nr == 0)
- /*
- * Due to the way UBIFS updates the clean znode counter it may
- * temporarily be negative.
- */
- return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
+ return clean_zn_cnt;
if (!clean_zn_cnt) {
/*
}
sup->leb_cnt = cpu_to_le32(c->leb_cnt);
err = ubifs_write_sb_node(c, sup);
- kfree(sup);
if (err)
goto out;
}
}
void
-__xfs_inode_clear_reclaim(
+__xfs_inode_clear_reclaim_tag(
+ xfs_mount_t *mp,
xfs_perag_t *pag,
xfs_inode_t *ip)
{
+ radix_tree_tag_clear(&pag->pag_ici_root,
+ XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
pag->pag_ici_reclaimable--;
}
-void
-__xfs_inode_clear_reclaim_tag(
- xfs_mount_t *mp,
- xfs_perag_t *pag,
- xfs_inode_t *ip)
-{
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
- __xfs_inode_clear_reclaim(pag, ip);
-}
-
STATIC int
xfs_reclaim_inode(
struct xfs_inode *ip,
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
-void __xfs_inode_clear_reclaim(struct xfs_perag *pag, struct xfs_inode *ip);
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
struct xfs_inode *ip);
write_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root, agino))
ASSERT(0);
- __xfs_inode_clear_reclaim(pag, ip);
write_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
- * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
+ *
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
-#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
#define PCI_DEVICE_ID_MATROX_G550 0x2527
#define PCI_DEVICE_ID_MATROX_VIA 0x4536
-#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2
-
#define PCI_VENDOR_ID_CT 0x102c
#define PCI_DEVICE_ID_CT_69000 0x00c0
#define PCI_DEVICE_ID_CT_65545 0x00d8
unsigned ret;
repeat:
- ret = ACCESS_ONCE(sl->sequence);
+ ret = sl->sequence;
+ smp_rmb();
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
- smp_rmb();
return ret;
}
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
{
- if (resume) {
- if (!(desc->status & IRQ_SUSPENDED)) {
- if (!desc->action)
- return;
- if (!(desc->action->flags & IRQF_FORCE_RESUME))
- return;
- /* Pretend that it got disabled ! */
- desc->depth++;
- }
+ if (resume)
desc->status &= ~IRQ_SUSPENDED;
- }
switch (desc->depth) {
case 0:
for_each_irq_desc(irq, desc) {
unsigned long flags;
+ if (!(desc->status & IRQ_SUSPENDED))
+ continue;
+
spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
spin_unlock_irqrestore(&desc->lock, flags);
int ret = 0;
if (unlikely(current->lockdep_recursion))
- return 1; /* avoid false negative lockdep_assert_held() */
+ return ret;
raw_local_irq_save(flags);
check_flags(flags);
cycle_t cycle_interval;
/* Number of clock shifted nano seconds in one NTP interval. */
u64 xtime_interval;
- /* shifted nano seconds left over when rounding cycle_interval */
- s64 xtime_remainder;
/* Raw nano seconds accumulated per NTP interval. */
u32 raw_interval;
static void timekeeper_setup_internals(struct clocksource *clock)
{
cycle_t interval;
- u64 tmp, ntpinterval;
+ u64 tmp;
timekeeper.clock = clock;
clock->cycle_last = clock->read(clock);
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH;
tmp <<= clock->shift;
- ntpinterval = tmp;
tmp += clock->mult/2;
do_div(tmp, clock->mult);
if (tmp == 0)
/* Go back from cycles -> shifted ns */
timekeeper.xtime_interval = (u64) interval * clock->mult;
- timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
timekeeper.raw_interval =
((u64) interval * clock->mult) >> clock->shift;
/* accumulate error between NTP and clock interval */
timekeeper.ntp_error += tick_length;
- timekeeper.ntp_error -=
- (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
+ timekeeper.ntp_error -= timekeeper.xtime_interval <<
timekeeper.ntp_error_shift;
}
ftrace_match_records(parser->buffer, parser->idx, enable);
}
+ mutex_lock(&ftrace_lock);
+ if (ftrace_start_up && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+ mutex_unlock(&ftrace_lock);
+
trace_parser_put(parser);
kfree(iter);
- if (file->f_mode & FMODE_WRITE) {
- mutex_lock(&ftrace_lock);
- if (ftrace_start_up && ftrace_enabled)
- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
- mutex_unlock(&ftrace_lock);
- }
-
mutex_unlock(&ftrace_regex_lock);
return 0;
}
#define HARDIRQ_ENTER() \
local_irq_disable(); \
- __irq_enter(); \
+ irq_enter(); \
WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \
*/
chg = vma_needs_reservation(h, vma, addr);
if (chg < 0)
- return ERR_PTR(-VM_FAULT_OOM);
+ return ERR_PTR(chg);
if (chg)
if (hugetlb_get_quota(inode->i_mapping, chg))
- return ERR_PTR(-VM_FAULT_SIGBUS);
+ return ERR_PTR(-ENOSPC);
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
++(*pos);
list_for_each_continue_rcu(n, &object_list) {
- struct kmemleak_object *obj =
- list_entry(n, struct kmemleak_object, object_list);
- if (get_object(obj)) {
- next_obj = obj;
+ next_obj = list_entry(n, struct kmemleak_object, object_list);
+ if (get_object(next_obj))
break;
- }
}
put_object(prev_obj);
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
-rebalance:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
if (page)
goto got_pg;
+rebalance:
/* Allocate without watermarks if the context allows */
if (alloc_flags & ALLOC_NO_WATERMARKS) {
page = __alloc_pages_high_priority(gfp_mask, order,
return pos - buf;
}
-static ssize_t show_atmindex(struct device *cdev,
- struct device_attribute *attr, char *buf)
-{
- struct atm_dev *adev = to_atm_dev(cdev);
-
- return sprintf(buf, "%d\n", adev->number);
-}
-
static ssize_t show_carrier(struct device *cdev,
struct device_attribute *attr, char *buf)
{
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
-static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
static struct device_attribute *atm_attrs[] = {
&dev_attr_atmaddress,
&dev_attr_address,
- &dev_attr_atmindex,
&dev_attr_carrier,
&dev_attr_type,
&dev_attr_link_rate,
int tcphoff, needs_ack;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
struct ipv6hdr *ip6h;
-#define DEFAULT_TOS_VALUE 0x0U
- const __u8 tclass = DEFAULT_TOS_VALUE;
struct dst_entry *dst = NULL;
u8 proto;
struct flowi fl;
skb_put(nskb, sizeof(struct ipv6hdr));
skb_reset_network_header(nskb);
ip6h = ipv6_hdr(nskb);
- *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20));
+ ip6h->version = 6;
ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT);
ip6h->nexthdr = IPPROTO_TCP;
ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
fp = skb_shinfo(head)->frag_list;
- if (fp && NFCT_FRAG6_CB(fp)->orig == NULL)
+ if (NFCT_FRAG6_CB(fp)->orig == NULL)
/* at above code, head skb is divided into two skbs. */
fp = fp->next;
hdr = ipv6_hdr(clone);
fhdr = (struct frag_hdr *)skb_transport_header(clone);
+ if (!(fhdr->frag_off & htons(0xFFF9))) {
+ pr_debug("Invalid fragment offset\n");
+ /* It is not a fragmented frame */
+ goto ret_orig;
+ }
+
if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
nf_ct_frag6_evictor();
u_int8_t orig, nv;
orig = ipv6_get_dsfield(iph);
- nv = (orig & ~info->tos_mask) ^ info->tos_value;
+ nv = (orig & info->tos_mask) ^ info->tos_value;
if (orig != nv) {
if (!skb_make_writable(skb, sizeof(struct iphdr)))
case TCP_CLOSE_WAIT:
/* The server initiated a shutdown of the socket */
xprt_force_disconnect(xprt);
+ case TCP_SYN_SENT:
xprt->connect_cookie++;
case TCP_CLOSING:
/*
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- int ret = -ENOTCONN;
if (!transport->inet) {
struct sock *sk = sock->sk;
}
if (!xprt_bound(xprt))
- goto out;
+ return -ENOTCONN;
/* Tell the socket layer to start connecting... */
xprt->stat.connect_count++;
xprt->stat.connect_start = jiffies;
- ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
- switch (ret) {
- case 0:
- case -EINPROGRESS:
- /* SYN_SENT! */
- xprt->connect_cookie++;
- if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
- xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
- }
-out:
- return ret;
+ return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
}
/**
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
- request->ssids[i].ssid_len = nla_len(attr);
if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
+ request->ssids[i].ssid_len = nla_len(attr);
i++;
}
}
SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One",
ALC268_ACER_ASPIRE_ONE),
SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
- SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron 910", ALC268_AUTO),
SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0,
"Dell Inspiron Mini9/Vostro A90", ALC268_DELL),
/* almost compatible with toshiba but with optional digital outs;
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
"Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
- "Dell Studio 1558", STAC_DELL_M6_DMIC),
+ "Dell Studio 1558", STAC_DELL_M6_BOTH),
{} /* terminator */
};
static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
{ "LINEOUT1 Mixer", "IN1L Switch", "IN1L PGA" },
{ "LINEOUT1 Mixer", "IN1R Switch", "IN1R PGA" },
- { "LINEOUT1 Mixer", "Output Switch", "Left Output PGA" },
+ { "LINEOUT1 Mixer", "Output Switch", "Left Output Mixer" },
{ "LINEOUT1N Driver", NULL, "LINEOUT1 Mixer" },
{ "LINEOUT1P Driver", NULL, "LINEOUT1 Mixer" },
};
static const struct snd_soc_dapm_route lineout1_se_routes[] = {
- { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
- { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
+ { "LINEOUT1N Mixer", "Left Output Switch", "Left Output Mixer" },
+ { "LINEOUT1N Mixer", "Right Output Switch", "Left Output Mixer" },
- { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
+ { "LINEOUT1P Mixer", "Left Output Switch", "Left Output Mixer" },
{ "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
{ "LINEOUT1P Driver", NULL, "LINEOUT1P Mixer" },
static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
{ "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
{ "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
- { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
+ { "LINEOUT2 Mixer", "Output Switch", "Right Output Mixer" },
{ "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
{ "LINEOUT2P Driver", NULL, "LINEOUT2 Mixer" },
};
static const struct snd_soc_dapm_route lineout2_se_routes[] = {
- { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
- { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
+ { "LINEOUT2N Mixer", "Left Output Switch", "Left Output Mixer" },
+ { "LINEOUT2N Mixer", "Right Output Switch", "Left Output Mixer" },
- { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
+ { "LINEOUT2P Mixer", "Right Output Switch", "Right Output Mixer" },
{ "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
{ "LINEOUT2P Driver", NULL, "LINEOUT2P Mixer" },
snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_3_4_VOLUME,
WM8993_IN2_VU, WM8993_IN2_VU);
- snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_LEFT,
- WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_RIGHT,
WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
snd_soc_update_bits(codec, WM8993_LEFT_OUTPUT_VOLUME,
- WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC,
- WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC);
+ WM8993_HPOUT1L_ZC, WM8993_HPOUT1L_ZC);
snd_soc_update_bits(codec, WM8993_RIGHT_OUTPUT_VOLUME,
WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC,
WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC);
snd_soc_update_bits(codec, WM8993_LEFT_OPGA_VOLUME,
- WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU,
- WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU);
+ WM8993_MIXOUTL_ZC, WM8993_MIXOUTL_ZC);
snd_soc_update_bits(codec, WM8993_RIGHT_OPGA_VOLUME,
WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU,
WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU);