extern unsigned long idle_halt;
extern unsigned long idle_nomwait;
+/*
+ * on systems with caches, caches must be flashed as the absolute
+ * last instruction before going into a suspended halt. Otherwise,
+ * dirty data can linger in the cache and become stale on resume,
+ * leading to strange errors.
+ *
+ * perform a variety of operations to guarantee that the compiler
+ * will not reorder instructions. wbinvd itself is serializing
+ * so the processor will not reorder.
+ *
+ * Systems without cache can just go into halt.
+ */
+static inline void wbinvd_halt(void)
+{
+ mb();
+ /* check for clflush to determine if wbinvd is legal */
+ if (cpu_has_clflush)
+ asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
+ else
+ while (1)
+ halt();
+}
+
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
setup_apic_nmi_watchdog(NULL);
apic_pm_activate();
-
- /*
- * Now that local APIC setup is completed for BP, configure the fault
- * handling for interrupt remapping.
- */
- if (!smp_processor_id() && intr_remapping_enabled)
- enable_drhd_fault_handling();
-
}
#ifdef CONFIG_X86_X2APIC
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
dmar_msi_write(irq, &msg);
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
}
+
+ /*
+ * Now that apic routing model is selected, configure the
+ * fault handling for intr remapping.
+ */
+ if (intr_remapping_enabled)
+ enable_drhd_fault_handling();
}
/* Same for both flat and physical. */
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
/* fixup topology information on multi-node processors */
- amd_fixup_dcm(c);
+ if ((c->x86 == 0x10) && (c->x86_model == 9))
+ amd_fixup_dcm(c);
#endif
}
local_irq_disable();
}
-#define MWAIT_SUBSTATE_MASK 0xf
-#define MWAIT_SUBSTATE_SIZE 4
-
-#define CPUID_MWAIT_LEAF 5
-#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
-
-/*
- * We need to flush the caches before going to sleep, lest we have
- * dirty data in our caches when we come back up.
- */
-static inline void mwait_play_dead(void)
-{
- unsigned int eax, ebx, ecx, edx;
- unsigned int highest_cstate = 0;
- unsigned int highest_subcstate = 0;
- int i;
- void *mwait_ptr;
-
- if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT))
- return;
- if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH))
- return;
- if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
-
- eax = CPUID_MWAIT_LEAF;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- /*
- * eax will be 0 if EDX enumeration is not valid.
- * Initialized below to cstate, sub_cstate value when EDX is valid.
- */
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
- eax = 0;
- } else {
- edx >>= MWAIT_SUBSTATE_SIZE;
- for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
- if (edx & MWAIT_SUBSTATE_MASK) {
- highest_cstate = i;
- highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
- }
- }
- eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
- (highest_subcstate - 1);
- }
-
- /*
- * This should be a memory location in a cache line which is
- * unlikely to be touched by other processors. The actual
- * content is immaterial as it is not actually modified in any way.
- */
- mwait_ptr = ¤t_thread_info()->flags;
-
- wbinvd();
-
- while (1) {
- /*
- * The CLFLUSH is a workaround for erratum AAI65 for
- * the Xeon 7400 series. It's not clear it is actually
- * needed, but it should be harmless in either case.
- * The WBINVD is insufficient due to the spurious-wakeup
- * case where we return around the loop.
- */
- clflush(mwait_ptr);
- __monitor(mwait_ptr, 0, 0);
- mb();
- __mwait(eax, 0);
- }
-}
-
-static inline void hlt_play_dead(void)
-{
- if (current_cpu_data.x86 >= 4)
- wbinvd();
-
- while (1) {
- native_halt();
- }
-}
-
void native_play_dead(void)
{
play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS);
-
- mwait_play_dead(); /* Only returns on failure */
- hlt_play_dead();
+ wbinvd_halt();
}
#else /* ... !CONFIG_HOTPLUG_CPU */
export CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
# This makes sure the $(obj) subdirectory exists even though vdso32/
# is not a kbuild sub-make subdirectory.
return 0;
fbio = bio;
- cluster = blk_queue_cluster(q);
+ cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
seg_size = 0;
phys_size = nr_phys_segs = 0;
for_each_bio(bio) {
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
- if (!blk_queue_cluster(q))
+ if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
int nsegs, cluster;
nsegs = 0;
- cluster = blk_queue_cluster(q);
+ cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
/*
* for each bio in rq
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
- lim->cluster = 1;
+ lim->no_cluster = 0;
}
EXPORT_SYMBOL(blk_set_default_limits);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
blk_stack_limits(&t->limits, &b->limits, 0);
+
+ if (!t->queue_lock)
+ WARN_ON_ONCE(1);
+ else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+ unsigned long flags;
+ spin_lock_irqsave(t->queue_lock, flags);
+ queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+ spin_unlock_irqrestore(t->queue_lock, flags);
+ }
}
EXPORT_SYMBOL(blk_queue_stack_limits);
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);
- t->cluster &= b->cluster;
+ t->no_cluster |= b->no_cluster;
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
+
+ if (!t->queue_lock)
+ WARN_ON_ONCE(1);
+ else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(t->queue_lock, flags);
+ if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+ queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+ spin_unlock_irqrestore(t->queue_lock, flags);
+ }
}
EXPORT_SYMBOL(disk_stack_limits);
* we must enter this object into the namespace. The created
* object is temporary and will be deleted upon completion of
* the execution of this method.
- *
- * Note 10/2010: Except for the Scope() op. This opcode does
- * not actually create a new object, it refers to an existing
- * object. However, for Scope(), we want to indeed open a
- * new scope.
*/
- if (op->common.aml_opcode != AML_SCOPE_OP) {
- status =
- acpi_ds_load2_begin_op(walk_state, NULL);
- } else {
- status =
- acpi_ds_scope_stack_push(op->named.node,
- op->named.node->
- type, walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ status = acpi_ds_load2_begin_op(walk_state, NULL);
}
+
break;
case AML_CLASS_EXECUTE:
ec_flag_msi, "MSI hardware", {
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
{
- ec_flag_msi, "MSI hardware", {
- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
- {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
{},
spin_lock_irqsave(&hp->lock, flags);
/* Check and then increment for fast path open. */
if (hp->count++ > 0) {
- tty_kref_get(tty);
spin_unlock_irqrestore(&hp->lock, flags);
hvc_kick();
return 0;
tty->driver_data = hp;
- hp->tty = tty_kref_get(tty);
+ hp->tty = tty;
spin_unlock_irqrestore(&hp->lock, flags);
spin_lock_irqsave(&hp->lock, flags);
hp->tty = NULL;
spin_unlock_irqrestore(&hp->lock, flags);
- tty_kref_put(tty);
tty->driver_data = NULL;
kref_put(&hp->kref, destroy_hvc_struct);
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
return;
hp = tty->driver_data;
-
spin_lock_irqsave(&hp->lock, flags);
if (--hp->count == 0) {
spin_unlock_irqrestore(&hp->lock, flags);
}
- tty_kref_put(tty);
kref_put(&hp->kref, destroy_hvc_struct);
}
spin_unlock_irqrestore(&hp->lock, flags);
if (hp->ops->notifier_hangup)
- hp->ops->notifier_hangup(hp, hp->data);
+ hp->ops->notifier_hangup(hp, hp->data);
while(temp_open_count) {
--temp_open_count;
- tty_kref_put(tty);
kref_put(&hp->kref, destroy_hvc_struct);
}
}
}
/* No tty attached, just skip */
- tty = tty_kref_get(hp->tty);
+ tty = hp->tty;
if (tty == NULL)
goto bail;
tty_flip_buffer_push(tty);
}
- if (tty)
- tty_kref_put(tty);
return poll_mask;
}
struct tty_struct *tty;
spin_lock_irqsave(&hp->lock, flags);
- tty = tty_kref_get(hp->tty);
+ tty = hp->tty;
if (hp->index < MAX_NR_HVC_CONSOLES)
vtermnos[hp->index] = -1;
/*
* We 'put' the instance that was grabbed when the kref instance
* was initialized using kref_init(). Let the last holder of this
- * kref cause it to be removed, which will probably be the tty_vhangup
+ * kref cause it to be removed, which will probably be the tty_hangup
* below.
*/
kref_put(&hp->kref, destroy_hvc_struct);
/*
- * This function call will auto chain call hvc_hangup.
+ * This function call will auto chain call hvc_hangup. The tty should
+ * always be valid at this time unless a simultaneous tty close already
+ * cleaned up the hvc_struct.
*/
- if (tty) {
- tty_vhangup(tty);
- tty_kref_put(tty);
- }
+ if (tty)
+ tty_hangup(tty);
return 0;
}
ret = ld->ops->open(tty);
if (ret)
clear_bit(TTY_LDISC_OPEN, &tty->flags);
- return ret;
}
return 0;
}
static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
- mv_xor_slot_cleanup(chan);
+ __mv_xor_slot_cleanup(chan);
}
static struct mv_xor_desc_slot *
debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
hole_off, hole_valid, intlv_sel);
- if (intlv_en &&
+ if (intlv_en ||
(intlv_sel != ((sys_addr >> 12) & intlv_en)))
return -EINVAL;
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
};
unsigned int minor = iminor(inode);
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
- int ret;
- mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
minor);
- ret = -ENODEV;
- goto unlock;
+ return -ENODEV;
}
list_del(&list->node);
kfree(list->hidraw);
}
}
+
kfree(list);
- ret = 0;
-unlock:
- mutex_unlock(&minors_lock);
- return ret;
+ return 0;
}
static long hidraw_ioctl(struct file *file, unsigned int cmd,
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct adm1026_data *data = i2c_get_clientdata(client);
- int val, orig_div, new_div;
+ int val, orig_div, new_div, shift;
val = simple_strtol(buf, NULL, 10);
new_div = DIV_TO_REG(val);
-
+ if (new_div == 0) {
+ return -EINVAL;
+ }
mutex_lock(&data->update_lock);
orig_div = data->fan_div[nr];
data->fan_div[nr] = DIV_FROM_REG(new_div);
if (nr < 4) { /* 0 <= nr < 4 */
+ shift = 2 * nr;
adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3,
- (DIV_TO_REG(data->fan_div[0]) << 0) |
- (DIV_TO_REG(data->fan_div[1]) << 2) |
- (DIV_TO_REG(data->fan_div[2]) << 4) |
- (DIV_TO_REG(data->fan_div[3]) << 6));
+ ((DIV_TO_REG(orig_div) & (~(0x03 << shift))) |
+ (new_div << shift)));
} else { /* 3 < nr < 8 */
+ shift = 2 * (nr - 4);
adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7,
- (DIV_TO_REG(data->fan_div[4]) << 0) |
- (DIV_TO_REG(data->fan_div[5]) << 2) |
- (DIV_TO_REG(data->fan_div[6]) << 4) |
- (DIV_TO_REG(data->fan_div[7]) << 6));
+ ((DIV_TO_REG(orig_div) & (~(0x03 << (2 * shift)))) |
+ (new_div << shift)));
}
if (data->fan_div[nr] != orig_div) {
return ret ? ret : in_len;
}
-static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
-{
- struct ib_uverbs_wc tmp;
-
- tmp.wr_id = wc->wr_id;
- tmp.status = wc->status;
- tmp.opcode = wc->opcode;
- tmp.vendor_err = wc->vendor_err;
- tmp.byte_len = wc->byte_len;
- tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
- tmp.qp_num = wc->qp->qp_num;
- tmp.src_qp = wc->src_qp;
- tmp.wc_flags = wc->wc_flags;
- tmp.pkey_index = wc->pkey_index;
- tmp.slid = wc->slid;
- tmp.sl = wc->sl;
- tmp.dlid_path_bits = wc->dlid_path_bits;
- tmp.port_num = wc->port_num;
- tmp.reserved = 0;
-
- if (copy_to_user(dest, &tmp, sizeof tmp))
- return -EFAULT;
-
- return 0;
-}
-
ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_poll_cq cmd;
- struct ib_uverbs_poll_cq_resp resp;
- u8 __user *header_ptr;
- u8 __user *data_ptr;
+ struct ib_uverbs_poll_cq_resp *resp;
struct ib_cq *cq;
- struct ib_wc wc;
- int ret;
+ struct ib_wc *wc;
+ int ret = 0;
+ int i;
+ int rsize;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
- if (!cq)
- return -EINVAL;
+ wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
- /* we copy a struct ib_uverbs_poll_cq_resp to user space */
- header_ptr = (void __user *)(unsigned long) cmd.response;
- data_ptr = header_ptr + sizeof resp;
+ rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
+ resp = kmalloc(rsize, GFP_KERNEL);
+ if (!resp) {
+ ret = -ENOMEM;
+ goto out_wc;
+ }
- memset(&resp, 0, sizeof resp);
- while (resp.count < cmd.ne) {
- ret = ib_poll_cq(cq, 1, &wc);
- if (ret < 0)
- goto out_put;
- if (!ret)
- break;
+ cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
+ if (!cq) {
+ ret = -EINVAL;
+ goto out;
+ }
- ret = copy_wc_to_user(data_ptr, &wc);
- if (ret)
- goto out_put;
+ resp->count = ib_poll_cq(cq, cmd.ne, wc);
+
+ put_cq_read(cq);
- data_ptr += sizeof(struct ib_uverbs_wc);
- ++resp.count;
+ for (i = 0; i < resp->count; i++) {
+ resp->wc[i].wr_id = wc[i].wr_id;
+ resp->wc[i].status = wc[i].status;
+ resp->wc[i].opcode = wc[i].opcode;
+ resp->wc[i].vendor_err = wc[i].vendor_err;
+ resp->wc[i].byte_len = wc[i].byte_len;
+ resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data;
+ resp->wc[i].qp_num = wc[i].qp->qp_num;
+ resp->wc[i].src_qp = wc[i].src_qp;
+ resp->wc[i].wc_flags = wc[i].wc_flags;
+ resp->wc[i].pkey_index = wc[i].pkey_index;
+ resp->wc[i].slid = wc[i].slid;
+ resp->wc[i].sl = wc[i].sl;
+ resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
+ resp->wc[i].port_num = wc[i].port_num;
}
- if (copy_to_user(header_ptr, &resp, sizeof resp)) {
+ if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize))
ret = -EFAULT;
- goto out_put;
- }
- ret = in_len;
+out:
+ kfree(resp);
-out_put:
- put_cq_read(cq);
- return ret;
+out_wc:
+ kfree(wc);
+ return ret ? ret : in_len;
}
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
*/
q->limits = *limits;
+ if (limits->no_cluster)
+ queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
+
dm_table_set_integrity(t);
/*
goto abort;
mddev->queue->queuedata = mddev;
+ /* Can be unlocked because the queue is new: no concurrency */
+ queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
+
blk_queue_make_request(mddev->queue, md_make_request);
disk = alloc_disk(1 << shift);
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
- /* set saved_raid_disk if appropriate */
+ /* set save_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks)
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
- if (test_bit(In_sync, &rdev->flags))
- rdev->saved_raid_disk = rdev->raid_disk;
- else
- rdev->saved_raid_disk = -1;
+ rdev->saved_raid_disk = rdev->raid_disk;
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
skb_record_rx_queue(skb, ring->queue_index);
- if (vlan_extracted && adapter->vlgrp)
+ if (vlan_extracted)
vlan_gro_receive(&ring->napi, adapter->vlgrp,
le16_to_cpu(rx_desc->wb.upper.vlan),
skb);
(unsigned long long)drhd->reg_base_addr, ret);
return -1;
}
-
- /*
- * Clear any previous faults.
- */
- dmar_fault(iommu->irq, iommu);
}
return 0;
extern struct pci_fixup __start_pci_fixups_suspend[];
extern struct pci_fixup __end_pci_fixups_suspend[];
-#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
-#define VTUNCERRMSK_REG 0x1ac
-#define VTD_MSK_SPEC_ERRORS (1 << 31)
-/*
- * This is a quirk for masking vt-d spec defined errors to platform error
- * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
- * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
- * on the RAS config settings of the platform) when a vt-d fault happens.
- * The resulting SMI caused the system to hang.
- *
- * VT-d spec related errors are already handled by the VT-d OS code, so no
- * need to report the same error through other channels.
- */
-static void vtd_mask_spec_errors(struct pci_dev *dev)
-{
- u32 word;
-
- pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
- pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
-#endif
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
{
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
- dm_len += bfa_port_meminfo();
meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
}
-static void
-bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
-{
- struct bfa_port_s *port = &bfa->modules.port;
- uint32_t dm_len;
- uint8_t *dm_kva;
- uint64_t dm_pa;
-
- dm_len = bfa_port_meminfo();
- dm_kva = bfa_meminfo_dma_virt(mi);
- dm_pa = bfa_meminfo_dma_phys(mi);
-
- memset(port, 0, sizeof(struct bfa_port_s));
- bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm);
- bfa_port_mem_claim(port, dm_kva, dm_pa);
-
- bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
- bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
-}
-
/**
* Use this function to do attach the driver instance with the BFA
* library. This function will not trigger any HW initialization
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
- bfa_com_port_attach(bfa, meminfo);
}
/**
blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
+ /* New queue, no concurrency on queue_flags */
if (!shost->use_clustering)
- q->limits.cluster = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
/*
* set a reasonable default alignment on word boundaries: the
/*
* uss720.c -- USS720 USB Parport Cable.
*
- * Copyright (C) 1999, 2005, 2010
+ * Copyright (C) 1999, 2005
* Thomas Sailer (t.sailer@alumni.ethz.ch)
*
* This program is free software; you can redistribute it and/or modify
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x0729, 0x1284) },
{ USB_DEVICE(0x1293, 0x0002) },
- { USB_DEVICE(0x1293, 0x0002) },
- { USB_DEVICE(0x050d, 0x0002) },
{ } /* Terminating entry */
};
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
- { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ }, /* Optional parameter entry */
#define MJSG_XM_RADIO_PID 0x937A
#define MJSG_HD_RADIO_PID 0x937C
-/*
- * D.O.Tec products (http://www.directout.eu)
- */
-#define FTDI_DOTEC_PID 0x9868
-
/*
* Xverve Signalyzer tools (http://www.signalyzer.com/)
*/
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64),
-/* Reported by Vitaly Kuznetsov <vitty@altlinux.ru> */
-UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
- "Samsung",
- "YP-CP3",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
-
/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
* Device uses standards-violating 32-byte Bulk Command Block Wrappers and
* reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_STACK_FLAGS;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-
- err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
- if (err)
- goto err;
-
err = insert_vm_struct(mm, vma);
if (err)
goto err;
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/compat.h>
static const struct file_operations fuse_direct_io_file_operations;
return 0;
}
-/* Make sure iov_length() won't overflow */
-static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
-{
- size_t n;
- u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
-
- for (n = 0; n < count; n++) {
- if (iov->iov_len > (size_t) max)
- return -ENOMEM;
- max -= iov->iov_len;
- }
- return 0;
-}
-
-/*
- * CUSE servers compiled on 32bit broke on 64bit kernels because the
- * ABI was defined to be 'struct iovec' which is different on 32bit
- * and 64bit. Fortunately we can determine which structure the server
- * used from the size of the reply.
- */
-static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src,
- size_t transferred, unsigned count,
- bool is_compat)
-{
-#ifdef CONFIG_COMPAT
- if (count * sizeof(struct compat_iovec) == transferred) {
- struct compat_iovec *ciov = src;
- unsigned i;
-
- /*
- * With this interface a 32bit server cannot support
- * non-compat (i.e. ones coming from 64bit apps) ioctl
- * requests
- */
- if (!is_compat)
- return -EINVAL;
-
- for (i = 0; i < count; i++) {
- dst[i].iov_base = compat_ptr(ciov[i].iov_base);
- dst[i].iov_len = ciov[i].iov_len;
- }
- return 0;
- }
-#endif
-
- if (count * sizeof(struct iovec) != transferred)
- return -EIO;
-
- memcpy(dst, src, transferred);
- return 0;
-}
-
/*
* For ioctls, there is no generic way to determine how much memory
* needs to be read and/or written. Furthermore, ioctls are allowed
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
+ err = -EIO;
+ if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
+ goto out;
+
+ /* okay, copy in iovs and retry */
vaddr = kmap_atomic(pages[0], KM_USER0);
- err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr,
- transferred, in_iovs + out_iovs,
- (flags & FUSE_IOCTL_COMPAT) != 0);
+ memcpy(page_address(iov_page), vaddr, transferred);
kunmap_atomic(vaddr, KM_USER0);
- if (err)
- goto out;
in_iov = page_address(iov_page);
out_iov = in_iov + in_iovs;
- err = fuse_verify_ioctl_iov(in_iov, in_iovs);
- if (err)
- goto out;
-
- err = fuse_verify_ioctl_iov(out_iov, out_iovs);
- if (err)
- goto out;
-
goto retry;
}
{
struct inode *inode = filp->f_mapping->host;
int status = 0;
- unsigned int saved_type = fl->fl_type;
/* Try local locking first */
posix_test_lock(filp, fl);
/* found a conflict */
goto out;
}
- fl->fl_type = saved_type;
if (nfs_have_delegation(inode, FMODE_READ))
goto out_noconflict;
static struct rpc_version mnt_version1 = {
.number = 1,
- .nrprocs = ARRAY_SIZE(mnt_procedures),
+ .nrprocs = 2,
.procs = mnt_procedures,
};
static struct rpc_version mnt_version3 = {
.number = 3,
- .nrprocs = ARRAY_SIZE(mnt3_procedures),
+ .nrprocs = 2,
.procs = mnt3_procedures,
};
err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
&fhp->fh_post_attr);
fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
- if (err) {
+ if (err)
fhp->fh_post_saved = 0;
- /* Grab the ctime anyway - set_change_info might use it */
- fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime;
- } else
+ else
fhp->fh_post_saved = 1;
}
unsigned short max_phys_segments;
unsigned char misaligned;
- unsigned char cluster;
+ unsigned char no_cluster;
};
struct request_queue
#endif
};
+#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_CLUSTER) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP))
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
-static inline unsigned int blk_queue_cluster(struct request_queue *q)
-{
- return q->limits.cluster;
-}
-
/*
* We regard a request as sync, if either a read or a sync write
*/
static inline void
set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
{
- BUG_ON(!fhp->fh_pre_saved);
- cinfo->atomic = fhp->fh_post_saved;
+ BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
+ cinfo->atomic = 1;
cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
-
- cinfo->before_change = fhp->fh_pre_change;
- cinfo->after_change = fhp->fh_post_change;
- cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
- cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
- cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
- cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
-
+ if (cinfo->change_supported) {
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+ } else {
+ cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
+ cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
+ cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
+ cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ }
}
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
/* 2nd level prototypes */
void sctp_generate_t3_rtx_event(unsigned long peer);
void sctp_generate_heartbeat_event(unsigned long peer);
-void sctp_generate_proto_unreach_event(unsigned long peer);
void sctp_ootb_pkt_free(struct sctp_packet *);
/* Heartbeat timer is per destination. */
struct timer_list hb_timer;
- /* Timer to handle ICMP proto unreachable envets */
- struct timer_list proto_unreach_timer;
-
/* Since we're using per-destination retransmission timers
* (see above), we're also using per-destination "transmitted"
* queues. This probably ought to be a private struct
if (atomic_dec_and_test(&sig->count))
posix_cpu_timers_exit_group(tsk);
else {
- /*
- * This can only happen if the caller is de_thread().
- * FIXME: this is the temporary hack, we should teach
- * posix-cpu-timers to handle this case correctly.
- */
- if (unlikely(has_group_leader_pid(tsk)))
- posix_cpu_timers_exit_group(tsk);
-
/*
* If there is any task waiting for the group exit
* then notify it:
free_all_swap_pages(data->swap);
if (data->frozen)
thaw_processes();
- pm_notifier_call_chain(data->mode == O_RDONLY ?
+ pm_notifier_call_chain(data->mode == O_WRONLY ?
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
int printk_needs_cpu(int cpu)
{
- if (unlikely(cpu_is_offline(cpu)))
- printk_tick();
return per_cpu(printk_pending, cpu);
}
struct tvec_base *base = __get_cpu_var(tvec_bases);
unsigned long expires;
- /*
- * Pretend that there is no timer pending if the cpu is offline.
- * Possible pending timers will be migrated later to an active cpu.
- */
- if (cpu_is_offline(smp_processor_id()))
- return now + NEXT_TIMER_MAX_DELTA;
spin_lock(&base->lock);
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
return count;
}
-static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
-{
- if (file->f_mode & FMODE_READ)
- return seq_lseek(file, offset, origin);
- else
- return 0;
-}
-
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
.write = tracing_write_stub,
- .llseek = tracing_seek,
+ .llseek = seq_lseek,
.release = tracing_release,
};
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
- int ret;
struct vm_area_struct *vma;
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma->vm_ops = &special_mapping_vmops;
vma->vm_private_data = pages;
- ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
- if (ret)
- goto out;
-
- ret = insert_vm_struct(mm, vma);
- if (ret)
- goto out;
+ if (unlikely(insert_vm_struct(mm, vma))) {
+ kmem_cache_free(vm_area_cachep, vma);
+ return -ENOMEM;
+ }
mm->total_vm += len >> PAGE_SHIFT;
perf_event_mmap(vma);
return 0;
-
-out:
- kmem_cache_free(vm_area_cachep, vma);
- return ret;
}
static DEFINE_MUTEX(mm_all_locks_mutex);
skb->next = nskb->next;
nskb->next = NULL;
-
- /*
- * If device doesnt need nskb->dst, release it right now while
- * its hot in this cpu cache
- */
- if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
- skb_dst_drop(nskb);
-
rc = ops->ndo_start_xmit(nskb, dev);
if (unlikely(rc != NETDEV_TX_OK)) {
nskb->next = skb->next;
{
SCTP_DEBUG_PRINTK("%s\n", __func__);
- if (sock_owned_by_user(sk)) {
- if (timer_pending(&t->proto_unreach_timer))
- return;
- else {
- if (!mod_timer(&t->proto_unreach_timer,
- jiffies + (HZ/20)))
- sctp_association_hold(asoc);
- }
+ sctp_do_sm(SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+ asoc->state, asoc->ep, asoc, t,
+ GFP_ATOMIC);
- } else {
- if (timer_pending(&t->proto_unreach_timer) &&
- del_timer(&t->proto_unreach_timer))
- sctp_association_put(asoc);
-
- sctp_do_sm(SCTP_EVENT_T_OTHER,
- SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
- asoc->state, asoc->ep, asoc, t,
- GFP_ATOMIC);
- }
}
/* Common lookup code for icmp/icmpv6 error handler. */
sctp_transport_put(transport);
}
-/* Handle the timeout of the ICMP protocol unreachable timer. Trigger
- * the correct state machine transition that will close the association.
- */
-void sctp_generate_proto_unreach_event(unsigned long data)
-{
- struct sctp_transport *transport = (struct sctp_transport *) data;
- struct sctp_association *asoc = transport->asoc;
-
- sctp_bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
- SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
-
- /* Try again later. */
- if (!mod_timer(&transport->proto_unreach_timer,
- jiffies + (HZ/20)))
- sctp_association_hold(asoc);
- goto out_unlock;
- }
-
- /* Is this structure just waiting around for us to actually
- * get destroyed?
- */
- if (asoc->base.dead)
- goto out_unlock;
-
- sctp_do_sm(SCTP_EVENT_T_OTHER,
- SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
- asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
-
-out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
- sctp_association_put(asoc);
-}
-
-
/* Inject a SACK Timeout event into the state machine. */
static void sctp_generate_sack_event(unsigned long data)
{
(unsigned long)peer);
setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
(unsigned long)peer);
- setup_timer(&peer->proto_unreach_timer,
- sctp_generate_proto_unreach_event, (unsigned long)peer);
/* Initialize the 64-bit random nonce sent with heartbeat. */
get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
spin_lock(&svc_xprt_class_lock);
list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
struct svc_xprt *newxprt;
- unsigned short newport;
if (strcmp(xprt_name, xcl->xcl_name))
continue;
spin_lock_bh(&serv->sv_lock);
list_add(&newxprt->xpt_list, &serv->sv_permsocks);
spin_unlock_bh(&serv->sv_lock);
- newport = svc_xprt_local_port(newxprt);
clear_bit(XPT_BUSY, &newxprt->xpt_flags);
- return newport;
+ return svc_xprt_local_port(newxprt);
}
err:
spin_unlock(&svc_xprt_class_lock);
{
BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
xprt->xpt_pool = NULL;
- /* As soon as we clear busy, the xprt could be closed and
- * 'put', so we need a reference to call svc_xprt_enqueue with:
- */
- svc_xprt_get(xprt);
clear_bit(XPT_BUSY, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
- svc_xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_received);
result = security_filter_rule_init(entry->lsm[lsm_rule].type,
Audit_equal, args,
&entry->lsm[lsm_rule].rule);
- if (!entry->lsm[lsm_rule].rule)
- return -EINVAL;
return result;
}
int i, n;
for (i = 0; i < num_mixer_volumes; i++) {
- if (strncmp(name, mixer_vols[i].name, 32) == 0) {
+ if (strcmp(name, mixer_vols[i].name) == 0) {
if (present)
mixer_vols[i].num = i;
return mixer_vols[i].levels;
}
n = num_mixer_volumes++;
- strncpy(mixer_vols[n].name, name, 32);
+ strcpy(mixer_vols[n].name, name);
if (present)
mixer_vols[n].num = n;
SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
- SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
- SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
- SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_LG),
SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_LG),
SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_LG_LW),
SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_TCL_S700),