return result;
}
-/*
- * NB: return value of non-zero would mean that we were a stacking driver.
- * make_request must always succeed.
- */
-static int nvme_make_request(struct request_queue *q, struct bio *bio)
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
{
struct nvme_ns *ns = q->queuedata;
struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
spin_unlock_irq(&nvmeq->q_lock);
put_nvmeq(nvmeq);
-
- return 0;
}
static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
}
static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
- unsigned dword11, dma_addr_t dma_addr, u32 *result)
+ unsigned dword11, dma_addr_t dma_addr)
{
struct nvme_command c;
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_set_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
return nvme_submit_admin_cmd(dev, &c, result);
}
offset = 0;
}
sg_mark_end(&sg[i - 1]);
+ iod->nents = count;
err = -ENOMEM;
nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
}
static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, int length, struct nvme_iod *iod)
+ struct nvme_iod *iod)
{
- struct scatterlist *sg = iod->sg;
- int i, count;
+ int i;
- count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
- dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- for (i = 0; i < count; i++)
- put_page(sg_page(&sg[i]));
+ for (i = 0; i < iod->nents; i++)
+ put_page(sg_page(&iod->sg[i]));
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
- nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, iod);
+ nvme_unmap_user_pages(dev, io.opcode & 1, iod);
nvme_free_iod(dev, iod);
return status;
}
status = nvme_submit_admin_cmd(dev, &c, NULL);
if (cmd.data_len) {
- nvme_unmap_user_pages(dev, cmd.opcode & 1, cmd.addr,
- cmd.data_len, iod);
+ nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
nvme_free_iod(dev, iod);
}
return status;
ns->queue = blk_alloc_queue(GFP_KERNEL);
if (!ns->queue)
goto out_free_ns;
- ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
- QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
+ ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
blk_queue_make_request(ns->queue, nvme_make_request);
ns->dev = dev;
ns->queue->queuedata = ns;
u32 result;
u32 q_count = (count - 1) | ((count - 1) << 16);
- status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result);
if (status)
return -EIO;
continue;
res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
- dma_addr + 4096, NULL);
+ dma_addr + 4096);
if (res)
continue;
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.7");
+MODULE_VERSION("0.8");
module_init(nvme_init);
module_exit(nvme_exit);