X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=drivers%2Fblock%2Fnvme-core.c;h=be35b1d1885480c7910a839edaf923a12c1f5aa7;hb=201cf1ecdfe5ea2774cbb21d4214c98ec8b418de;hp=6f04771f1019798cc2feabf73eff2ddbadc84b81;hpb=0d8770815f70cf41b69a82ede272b026dbb2df7d;p=firefly-linux-kernel-4.4.55.git diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 6f04771f1019..be35b1d18854 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1285,7 +1285,6 @@ static void nvme_abort_req(struct request *req) list_del_init(&dev->node); dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); - dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); out: spin_unlock_irqrestore(&dev_list_lock, flags); @@ -1943,6 +1942,20 @@ static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, #define nvme_compat_ioctl NULL #endif +static void nvme_free_dev(struct kref *kref); +static void nvme_free_ns(struct kref *kref) +{ + struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); + + spin_lock(&dev_list_lock); + ns->disk->private_data = NULL; + spin_unlock(&dev_list_lock); + + kref_put(&ns->dev->kref, nvme_free_dev); + put_disk(ns->disk); + kfree(ns); +} + static int nvme_open(struct block_device *bdev, fmode_t mode) { int ret = 0; @@ -1952,21 +1965,17 @@ static int nvme_open(struct block_device *bdev, fmode_t mode) ns = bdev->bd_disk->private_data; if (!ns) ret = -ENXIO; - else if (!kref_get_unless_zero(&ns->dev->kref)) + else if (!kref_get_unless_zero(&ns->kref)) ret = -ENXIO; spin_unlock(&dev_list_lock); return ret; } -static void nvme_free_dev(struct kref *kref); - static void nvme_release(struct gendisk *disk, fmode_t mode) { struct nvme_ns *ns = disk->private_data; - struct nvme_dev *dev = ns->dev; - - kref_put(&dev->kref, nvme_free_dev); + kref_put(&ns->kref, nvme_free_ns); } static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo) @@ -2079,7 +2088,6 @@ static int nvme_kthread(void *data) dev_warn(dev->dev, "Failed status: %x, reset controller\n", readl(&dev->bar->csts)); - dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); continue; } @@ -2126,6 +2134,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) if (!disk) goto out_free_queue; + kref_init(&ns->kref); ns->ns_id = nsid; ns->disk = disk; ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ @@ -2162,6 +2171,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) if (nvme_revalidate_disk(ns->disk)) goto out_free_disk; + kref_get(&dev->kref); add_disk(ns->disk); if (ns->ms) { struct block_device *bd = bdget_disk(ns->disk, 0); @@ -2357,18 +2367,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) return result; } -static void nvme_free_namespace(struct nvme_ns *ns) -{ - list_del(&ns->list); - - spin_lock(&dev_list_lock); - ns->disk->private_data = NULL; - spin_unlock(&dev_list_lock); - - put_disk(ns->disk); - kfree(ns); -} - static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) { struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); @@ -2410,7 +2408,9 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (kill || !blk_queue_dying(ns->queue)) { blk_mq_abort_requeue_list(ns->queue); blk_cleanup_queue(ns->queue); - } + } + list_del_init(&ns->list); + kref_put(&ns->kref, nvme_free_ns); } static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) @@ -2421,18 +2421,14 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) for (i = 1; i <= nn; i++) { ns = nvme_find_ns(dev, i); if (ns) { - if (revalidate_disk(ns->disk)) { + if (revalidate_disk(ns->disk)) nvme_ns_remove(ns); - nvme_free_namespace(ns); - } } else nvme_alloc_ns(dev, i); } list_for_each_entry_safe(ns, next, &dev->namespaces, list) { - if (ns->ns_id > nn) { + if (ns->ns_id > nn) nvme_ns_remove(ns); - nvme_free_namespace(ns); - } } list_sort(NULL, &dev->namespaces, ns_cmp); } @@ -2822,9 +2818,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) static void nvme_dev_remove(struct nvme_dev *dev) { - struct nvme_ns *ns; + struct nvme_ns *ns, *next; - list_for_each_entry(ns, &dev->namespaces, list) + list_for_each_entry_safe(ns, next, &dev->namespaces, list) nvme_ns_remove(ns); } @@ -2880,21 +2876,12 @@ static void nvme_release_instance(struct nvme_dev *dev) spin_unlock(&dev_list_lock); } -static void nvme_free_namespaces(struct nvme_dev *dev) -{ - struct nvme_ns *ns, *next; - - list_for_each_entry_safe(ns, next, &dev->namespaces, list) - nvme_free_namespace(ns); -} - static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); put_device(dev->dev); put_device(dev->device); - nvme_free_namespaces(dev); nvme_release_instance(dev); if (dev->tagset.tags) blk_mq_free_tag_set(&dev->tagset); @@ -3036,14 +3023,6 @@ static int nvme_remove_dead_ctrl(void *arg) return 0; } -static void nvme_remove_disks(struct work_struct *ws) -{ - struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); - - nvme_free_queues(dev, 1); - nvme_dev_remove(dev); -} - static int nvme_dev_resume(struct nvme_dev *dev) { int ret; @@ -3052,10 +3031,9 @@ static int nvme_dev_resume(struct nvme_dev *dev) if (ret) return ret; if (dev->online_queues < 2) { - spin_lock(&dev_list_lock); - dev->reset_workfn = nvme_remove_disks; - queue_work(nvme_workq, &dev->reset_work); - spin_unlock(&dev_list_lock); + dev_warn(dev->dev, "IO queues not created\n"); + nvme_free_queues(dev, 1); + nvme_dev_remove(dev); } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); @@ -3102,12 +3080,6 @@ static void nvme_reset_failed_dev(struct work_struct *ws) nvme_dev_reset(dev); } -static void nvme_reset_workfn(struct work_struct *work) -{ - struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); - dev->reset_workfn(work); -} - static int nvme_reset(struct nvme_dev *dev) { int ret = -EBUSY; @@ -3117,7 +3089,7 @@ static int nvme_reset(struct nvme_dev *dev) spin_lock(&dev_list_lock); if (!work_pending(&dev->reset_work)) { - dev->reset_workfn = nvme_reset_failed_dev; + list_del_init(&dev->node); queue_work(nvme_workq, &dev->reset_work); ret = 0; } @@ -3170,8 +3142,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto free; INIT_LIST_HEAD(&dev->namespaces); - dev->reset_workfn = nvme_reset_failed_dev; - INIT_WORK(&dev->reset_work, nvme_reset_workfn); + INIT_WORK(&dev->reset_work, nvme_reset_failed_dev); dev->dev = get_device(&pdev->dev); pci_set_drvdata(pdev, dev); result = nvme_set_instance(dev); @@ -3234,7 +3205,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) if (prepare) nvme_dev_shutdown(dev); else - nvme_dev_resume(dev); + schedule_work(&dev->probe_work); } static void nvme_shutdown(struct pci_dev *pdev) @@ -3288,10 +3259,7 @@ static int nvme_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) { - ndev->reset_workfn = nvme_reset_failed_dev; - queue_work(nvme_workq, &ndev->reset_work); - } + schedule_work(&ndev->probe_work); return 0; } #endif