iommu/tegra-gart: Make use of domain_alloc and domain_free
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / tegra-gart.c
1 /*
2  * IOMMU API for GART in Tegra20
3  *
4  * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #define pr_fmt(fmt)     "%s(): " fmt, __func__
21
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/mm.h>
28 #include <linux/list.h>
29 #include <linux/device.h>
30 #include <linux/io.h>
31 #include <linux/iommu.h>
32 #include <linux/of.h>
33
34 #include <asm/cacheflush.h>
35
36 /* bitmap of the page sizes currently supported */
37 #define GART_IOMMU_PGSIZES      (SZ_4K)
38
39 #define GART_REG_BASE           0x24
40 #define GART_CONFIG             (0x24 - GART_REG_BASE)
41 #define GART_ENTRY_ADDR         (0x28 - GART_REG_BASE)
42 #define GART_ENTRY_DATA         (0x2c - GART_REG_BASE)
43 #define GART_ENTRY_PHYS_ADDR_VALID      (1 << 31)
44
45 #define GART_PAGE_SHIFT         12
46 #define GART_PAGE_SIZE          (1 << GART_PAGE_SHIFT)
47 #define GART_PAGE_MASK                                          \
48         (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
49
50 struct gart_client {
51         struct device           *dev;
52         struct list_head        list;
53 };
54
55 struct gart_device {
56         void __iomem            *regs;
57         u32                     *savedata;
58         u32                     page_count;     /* total remappable size */
59         dma_addr_t              iovmm_base;     /* offset to vmm_area */
60         spinlock_t              pte_lock;       /* for pagetable */
61         struct list_head        client;
62         spinlock_t              client_lock;    /* for client list */
63         struct device           *dev;
64 };
65
66 struct gart_domain {
67         struct iommu_domain domain;             /* generic domain handle */
68         struct gart_device *gart;               /* link to gart device   */
69 };
70
71 static struct gart_device *gart_handle; /* unique for a system */
72
73 #define GART_PTE(_pfn)                                          \
74         (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
75
76 static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
77 {
78         return container_of(dom, struct gart_domain, domain);
79 }
80
81 /*
82  * Any interaction between any block on PPSB and a block on APB or AHB
83  * must have these read-back to ensure the APB/AHB bus transaction is
84  * complete before initiating activity on the PPSB block.
85  */
86 #define FLUSH_GART_REGS(gart)   ((void)readl((gart)->regs + GART_CONFIG))
87
88 #define for_each_gart_pte(gart, iova)                                   \
89         for (iova = gart->iovmm_base;                                   \
90              iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
91              iova += GART_PAGE_SIZE)
92
93 static inline void gart_set_pte(struct gart_device *gart,
94                                 unsigned long offs, u32 pte)
95 {
96         writel(offs, gart->regs + GART_ENTRY_ADDR);
97         writel(pte, gart->regs + GART_ENTRY_DATA);
98
99         dev_dbg(gart->dev, "%s %08lx:%08x\n",
100                  pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
101 }
102
103 static inline unsigned long gart_read_pte(struct gart_device *gart,
104                                           unsigned long offs)
105 {
106         unsigned long pte;
107
108         writel(offs, gart->regs + GART_ENTRY_ADDR);
109         pte = readl(gart->regs + GART_ENTRY_DATA);
110
111         return pte;
112 }
113
114 static void do_gart_setup(struct gart_device *gart, const u32 *data)
115 {
116         unsigned long iova;
117
118         for_each_gart_pte(gart, iova)
119                 gart_set_pte(gart, iova, data ? *(data++) : 0);
120
121         writel(1, gart->regs + GART_CONFIG);
122         FLUSH_GART_REGS(gart);
123 }
124
125 #ifdef DEBUG
126 static void gart_dump_table(struct gart_device *gart)
127 {
128         unsigned long iova;
129         unsigned long flags;
130
131         spin_lock_irqsave(&gart->pte_lock, flags);
132         for_each_gart_pte(gart, iova) {
133                 unsigned long pte;
134
135                 pte = gart_read_pte(gart, iova);
136
137                 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
138                         (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
139                         iova, pte & GART_PAGE_MASK);
140         }
141         spin_unlock_irqrestore(&gart->pte_lock, flags);
142 }
143 #else
144 static inline void gart_dump_table(struct gart_device *gart)
145 {
146 }
147 #endif
148
149 static inline bool gart_iova_range_valid(struct gart_device *gart,
150                                          unsigned long iova, size_t bytes)
151 {
152         unsigned long iova_start, iova_end, gart_start, gart_end;
153
154         iova_start = iova;
155         iova_end = iova_start + bytes - 1;
156         gart_start = gart->iovmm_base;
157         gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
158
159         if (iova_start < gart_start)
160                 return false;
161         if (iova_end > gart_end)
162                 return false;
163         return true;
164 }
165
166 static int gart_iommu_attach_dev(struct iommu_domain *domain,
167                                  struct device *dev)
168 {
169         struct gart_domain *gart_domain = to_gart_domain(domain);
170         struct gart_device *gart;
171         struct gart_client *client, *c;
172         int err = 0;
173
174         gart = gart_handle;
175         if (!gart)
176                 return -EINVAL;
177         gart_domain->gart = gart;
178
179         domain->geometry.aperture_start = gart->iovmm_base;
180         domain->geometry.aperture_end   = gart->iovmm_base +
181                                         gart->page_count * GART_PAGE_SIZE - 1;
182         domain->geometry.force_aperture = true;
183
184         client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
185         if (!client)
186                 return -ENOMEM;
187         client->dev = dev;
188
189         spin_lock(&gart->client_lock);
190         list_for_each_entry(c, &gart->client, list) {
191                 if (c->dev == dev) {
192                         dev_err(gart->dev,
193                                 "%s is already attached\n", dev_name(dev));
194                         err = -EINVAL;
195                         goto fail;
196                 }
197         }
198         list_add(&client->list, &gart->client);
199         spin_unlock(&gart->client_lock);
200         dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
201         return 0;
202
203 fail:
204         devm_kfree(gart->dev, client);
205         spin_unlock(&gart->client_lock);
206         return err;
207 }
208
209 static void gart_iommu_detach_dev(struct iommu_domain *domain,
210                                   struct device *dev)
211 {
212         struct gart_domain *gart_domain = to_gart_domain(domain);
213         struct gart_device *gart = gart_domain->gart;
214         struct gart_client *c;
215
216         spin_lock(&gart->client_lock);
217
218         list_for_each_entry(c, &gart->client, list) {
219                 if (c->dev == dev) {
220                         list_del(&c->list);
221                         devm_kfree(gart->dev, c);
222                         dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
223                         goto out;
224                 }
225         }
226         dev_err(gart->dev, "Couldn't find\n");
227 out:
228         spin_unlock(&gart->client_lock);
229 }
230
231 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
232 {
233         struct gart_domain *gart_domain;
234
235         if (type != IOMMU_DOMAIN_UNMANAGED)
236                 return NULL;
237
238         gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
239         if (!gart_domain)
240                 return NULL;
241
242         return &gart_domain->domain;
243 }
244
245 static void gart_iommu_domain_free(struct iommu_domain *domain)
246 {
247         struct gart_domain *gart_domain = to_gart_domain(domain);
248         struct gart_device *gart = gart_domain->gart;
249
250         if (gart) {
251                 spin_lock(&gart->client_lock);
252                 if (!list_empty(&gart->client)) {
253                         struct gart_client *c;
254
255                         list_for_each_entry(c, &gart->client, list)
256                                 gart_iommu_detach_dev(domain, c->dev);
257                 }
258                 spin_unlock(&gart->client_lock);
259         }
260
261         kfree(gart_domain);
262 }
263
264 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
265                           phys_addr_t pa, size_t bytes, int prot)
266 {
267         struct gart_domain *gart_domain = to_gart_domain(domain);
268         struct gart_device *gart = gart_domain->gart;
269         unsigned long flags;
270         unsigned long pfn;
271
272         if (!gart_iova_range_valid(gart, iova, bytes))
273                 return -EINVAL;
274
275         spin_lock_irqsave(&gart->pte_lock, flags);
276         pfn = __phys_to_pfn(pa);
277         if (!pfn_valid(pfn)) {
278                 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
279                 spin_unlock_irqrestore(&gart->pte_lock, flags);
280                 return -EINVAL;
281         }
282         gart_set_pte(gart, iova, GART_PTE(pfn));
283         FLUSH_GART_REGS(gart);
284         spin_unlock_irqrestore(&gart->pte_lock, flags);
285         return 0;
286 }
287
288 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
289                                size_t bytes)
290 {
291         struct gart_domain *gart_domain = to_gart_domain(domain);
292         struct gart_device *gart = gart_domain->gart;
293         unsigned long flags;
294
295         if (!gart_iova_range_valid(gart, iova, bytes))
296                 return 0;
297
298         spin_lock_irqsave(&gart->pte_lock, flags);
299         gart_set_pte(gart, iova, 0);
300         FLUSH_GART_REGS(gart);
301         spin_unlock_irqrestore(&gart->pte_lock, flags);
302         return 0;
303 }
304
305 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
306                                            dma_addr_t iova)
307 {
308         struct gart_domain *gart_domain = to_gart_domain(domain);
309         struct gart_device *gart = gart_domain->gart;
310         unsigned long pte;
311         phys_addr_t pa;
312         unsigned long flags;
313
314         if (!gart_iova_range_valid(gart, iova, 0))
315                 return -EINVAL;
316
317         spin_lock_irqsave(&gart->pte_lock, flags);
318         pte = gart_read_pte(gart, iova);
319         spin_unlock_irqrestore(&gart->pte_lock, flags);
320
321         pa = (pte & GART_PAGE_MASK);
322         if (!pfn_valid(__phys_to_pfn(pa))) {
323                 dev_err(gart->dev, "No entry for %08llx:%pa\n",
324                          (unsigned long long)iova, &pa);
325                 gart_dump_table(gart);
326                 return -EINVAL;
327         }
328         return pa;
329 }
330
331 static bool gart_iommu_capable(enum iommu_cap cap)
332 {
333         return false;
334 }
335
336 static const struct iommu_ops gart_iommu_ops = {
337         .capable        = gart_iommu_capable,
338         .domain_alloc   = gart_iommu_domain_alloc,
339         .domain_free    = gart_iommu_domain_free,
340         .attach_dev     = gart_iommu_attach_dev,
341         .detach_dev     = gart_iommu_detach_dev,
342         .map            = gart_iommu_map,
343         .map_sg         = default_iommu_map_sg,
344         .unmap          = gart_iommu_unmap,
345         .iova_to_phys   = gart_iommu_iova_to_phys,
346         .pgsize_bitmap  = GART_IOMMU_PGSIZES,
347 };
348
349 static int tegra_gart_suspend(struct device *dev)
350 {
351         struct gart_device *gart = dev_get_drvdata(dev);
352         unsigned long iova;
353         u32 *data = gart->savedata;
354         unsigned long flags;
355
356         spin_lock_irqsave(&gart->pte_lock, flags);
357         for_each_gart_pte(gart, iova)
358                 *(data++) = gart_read_pte(gart, iova);
359         spin_unlock_irqrestore(&gart->pte_lock, flags);
360         return 0;
361 }
362
363 static int tegra_gart_resume(struct device *dev)
364 {
365         struct gart_device *gart = dev_get_drvdata(dev);
366         unsigned long flags;
367
368         spin_lock_irqsave(&gart->pte_lock, flags);
369         do_gart_setup(gart, gart->savedata);
370         spin_unlock_irqrestore(&gart->pte_lock, flags);
371         return 0;
372 }
373
374 static int tegra_gart_probe(struct platform_device *pdev)
375 {
376         struct gart_device *gart;
377         struct resource *res, *res_remap;
378         void __iomem *gart_regs;
379         struct device *dev = &pdev->dev;
380
381         if (gart_handle)
382                 return -EIO;
383
384         BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
385
386         /* the GART memory aperture is required */
387         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
388         res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
389         if (!res || !res_remap) {
390                 dev_err(dev, "GART memory aperture expected\n");
391                 return -ENXIO;
392         }
393
394         gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
395         if (!gart) {
396                 dev_err(dev, "failed to allocate gart_device\n");
397                 return -ENOMEM;
398         }
399
400         gart_regs = devm_ioremap(dev, res->start, resource_size(res));
401         if (!gart_regs) {
402                 dev_err(dev, "failed to remap GART registers\n");
403                 return -ENXIO;
404         }
405
406         gart->dev = &pdev->dev;
407         spin_lock_init(&gart->pte_lock);
408         spin_lock_init(&gart->client_lock);
409         INIT_LIST_HEAD(&gart->client);
410         gart->regs = gart_regs;
411         gart->iovmm_base = (dma_addr_t)res_remap->start;
412         gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
413
414         gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
415         if (!gart->savedata) {
416                 dev_err(dev, "failed to allocate context save area\n");
417                 return -ENOMEM;
418         }
419
420         platform_set_drvdata(pdev, gart);
421         do_gart_setup(gart, NULL);
422
423         gart_handle = gart;
424
425         return 0;
426 }
427
428 static int tegra_gart_remove(struct platform_device *pdev)
429 {
430         struct gart_device *gart = platform_get_drvdata(pdev);
431
432         writel(0, gart->regs + GART_CONFIG);
433         if (gart->savedata)
434                 vfree(gart->savedata);
435         gart_handle = NULL;
436         return 0;
437 }
438
439 static const struct dev_pm_ops tegra_gart_pm_ops = {
440         .suspend        = tegra_gart_suspend,
441         .resume         = tegra_gart_resume,
442 };
443
444 static const struct of_device_id tegra_gart_of_match[] = {
445         { .compatible = "nvidia,tegra20-gart", },
446         { },
447 };
448 MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
449
450 static struct platform_driver tegra_gart_driver = {
451         .probe          = tegra_gart_probe,
452         .remove         = tegra_gart_remove,
453         .driver = {
454                 .name   = "tegra-gart",
455                 .pm     = &tegra_gart_pm_ops,
456                 .of_match_table = tegra_gart_of_match,
457         },
458 };
459
460 static int tegra_gart_init(void)
461 {
462         return platform_driver_register(&tegra_gart_driver);
463 }
464
465 static void __exit tegra_gart_exit(void)
466 {
467         platform_driver_unregister(&tegra_gart_driver);
468 }
469
470 subsys_initcall(tegra_gart_init);
471 module_exit(tegra_gart_exit);
472
473 MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
474 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
475 MODULE_ALIAS("platform:tegra-gart");
476 MODULE_LICENSE("GPL v2");