2fecbe7fd7fdd897566756eae739b730bc82ceb6
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / tegra-smmu.c
1 /*
2  * IOMMU API for SMMU in Tegra30
3  *
4  * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #define pr_fmt(fmt)     "%s(): " fmt, __func__
21
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/mm.h>
28 #include <linux/pagemap.h>
29 #include <linux/device.h>
30 #include <linux/sched.h>
31 #include <linux/iommu.h>
32 #include <linux/io.h>
33 #include <linux/of.h>
34 #include <linux/of_iommu.h>
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #include <linux/tegra-ahb.h>
38
39 #include <asm/page.h>
40 #include <asm/cacheflush.h>
41
42 enum smmu_hwgrp {
43         HWGRP_AFI,
44         HWGRP_AVPC,
45         HWGRP_DC,
46         HWGRP_DCB,
47         HWGRP_EPP,
48         HWGRP_G2,
49         HWGRP_HC,
50         HWGRP_HDA,
51         HWGRP_ISP,
52         HWGRP_MPE,
53         HWGRP_NV,
54         HWGRP_NV2,
55         HWGRP_PPCS,
56         HWGRP_SATA,
57         HWGRP_VDE,
58         HWGRP_VI,
59
60         HWGRP_COUNT,
61
62         HWGRP_END = ~0,
63 };
64
65 #define HWG_AFI         (1 << HWGRP_AFI)
66 #define HWG_AVPC        (1 << HWGRP_AVPC)
67 #define HWG_DC          (1 << HWGRP_DC)
68 #define HWG_DCB         (1 << HWGRP_DCB)
69 #define HWG_EPP         (1 << HWGRP_EPP)
70 #define HWG_G2          (1 << HWGRP_G2)
71 #define HWG_HC          (1 << HWGRP_HC)
72 #define HWG_HDA         (1 << HWGRP_HDA)
73 #define HWG_ISP         (1 << HWGRP_ISP)
74 #define HWG_MPE         (1 << HWGRP_MPE)
75 #define HWG_NV          (1 << HWGRP_NV)
76 #define HWG_NV2         (1 << HWGRP_NV2)
77 #define HWG_PPCS        (1 << HWGRP_PPCS)
78 #define HWG_SATA        (1 << HWGRP_SATA)
79 #define HWG_VDE         (1 << HWGRP_VDE)
80 #define HWG_VI          (1 << HWGRP_VI)
81
82 /* bitmap of the page sizes currently supported */
83 #define SMMU_IOMMU_PGSIZES      (SZ_4K)
84
85 #define SMMU_CONFIG                             0x10
86 #define SMMU_CONFIG_DISABLE                     0
87 #define SMMU_CONFIG_ENABLE                      1
88
89 /* REVISIT: To support multiple MCs */
90 enum {
91         _MC = 0,
92 };
93
94 enum {
95         _TLB = 0,
96         _PTC,
97 };
98
99 #define SMMU_CACHE_CONFIG_BASE                  0x14
100 #define __SMMU_CACHE_CONFIG(mc, cache)          (SMMU_CACHE_CONFIG_BASE + 4 * cache)
101 #define SMMU_CACHE_CONFIG(cache)                __SMMU_CACHE_CONFIG(_MC, cache)
102
103 #define SMMU_CACHE_CONFIG_STATS_SHIFT           31
104 #define SMMU_CACHE_CONFIG_STATS_ENABLE          (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
105 #define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT      30
106 #define SMMU_CACHE_CONFIG_STATS_TEST            (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
107
108 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE  (1 << 29)
109 #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE     0x10
110 #define SMMU_TLB_CONFIG_RESET_VAL               0x20000010
111
112 #define SMMU_PTC_CONFIG_CACHE__ENABLE           (1 << 29)
113 #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN      0x3f
114 #define SMMU_PTC_CONFIG_RESET_VAL               0x2000003f
115
116 #define SMMU_PTB_ASID                           0x1c
117 #define SMMU_PTB_ASID_CURRENT_SHIFT             0
118
119 #define SMMU_PTB_DATA                           0x20
120 #define SMMU_PTB_DATA_RESET_VAL                 0
121 #define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT      29
122 #define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT       30
123 #define SMMU_PTB_DATA_ASID_READABLE_SHIFT       31
124
125 #define SMMU_TLB_FLUSH                          0x30
126 #define SMMU_TLB_FLUSH_VA_MATCH_ALL             0
127 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION         2
128 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP           3
129 #define SMMU_TLB_FLUSH_ASID_SHIFT               29
130 #define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE       0
131 #define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE        1
132 #define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT         31
133
134 #define SMMU_PTC_FLUSH                          0x34
135 #define SMMU_PTC_FLUSH_TYPE_ALL                 0
136 #define SMMU_PTC_FLUSH_TYPE_ADR                 1
137 #define SMMU_PTC_FLUSH_ADR_SHIFT                4
138
139 #define SMMU_ASID_SECURITY                      0x38
140
141 #define SMMU_STATS_CACHE_COUNT_BASE             0x1f0
142
143 #define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss)              \
144         (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
145
146 #define SMMU_TRANSLATION_ENABLE_0               0x228
147 #define SMMU_TRANSLATION_ENABLE_1               0x22c
148 #define SMMU_TRANSLATION_ENABLE_2               0x230
149
150 #define SMMU_AFI_ASID   0x238   /* PCIE */
151 #define SMMU_AVPC_ASID  0x23c   /* AVP */
152 #define SMMU_DC_ASID    0x240   /* Display controller */
153 #define SMMU_DCB_ASID   0x244   /* Display controller B */
154 #define SMMU_EPP_ASID   0x248   /* Encoder pre-processor */
155 #define SMMU_G2_ASID    0x24c   /* 2D engine */
156 #define SMMU_HC_ASID    0x250   /* Host1x */
157 #define SMMU_HDA_ASID   0x254   /* High-def audio */
158 #define SMMU_ISP_ASID   0x258   /* Image signal processor */
159 #define SMMU_MPE_ASID   0x264   /* MPEG encoder */
160 #define SMMU_NV_ASID    0x268   /* (3D) */
161 #define SMMU_NV2_ASID   0x26c   /* (3D) */
162 #define SMMU_PPCS_ASID  0x270   /* AHB */
163 #define SMMU_SATA_ASID  0x278   /* SATA */
164 #define SMMU_VDE_ASID   0x27c   /* Video decoder */
165 #define SMMU_VI_ASID    0x280   /* Video input */
166
167 #define SMMU_PDE_NEXT_SHIFT             28
168
169 #define SMMU_TLB_FLUSH_VA_SECTION__MASK         0xffc00000
170 #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT        12 /* right shift */
171 #define SMMU_TLB_FLUSH_VA_GROUP__MASK           0xffffc000
172 #define SMMU_TLB_FLUSH_VA_GROUP__SHIFT          12 /* right shift */
173 #define SMMU_TLB_FLUSH_VA(iova, which)  \
174         ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
175                 SMMU_TLB_FLUSH_VA_##which##__SHIFT) |   \
176         SMMU_TLB_FLUSH_VA_MATCH_##which)
177 #define SMMU_PTB_ASID_CUR(n)    \
178                 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
179 #define SMMU_TLB_FLUSH_ASID_MATCH_disable               \
180                 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE <<   \
181                         SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
182 #define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE               \
183                 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE <<    \
184                         SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
185
186 #define SMMU_PAGE_SHIFT 12
187 #define SMMU_PAGE_SIZE  (1 << SMMU_PAGE_SHIFT)
188 #define SMMU_PAGE_MASK  ((1 << SMMU_PAGE_SHIFT) - 1)
189
190 #define SMMU_PDIR_COUNT 1024
191 #define SMMU_PDIR_SIZE  (sizeof(unsigned long) * SMMU_PDIR_COUNT)
192 #define SMMU_PTBL_COUNT 1024
193 #define SMMU_PTBL_SIZE  (sizeof(unsigned long) * SMMU_PTBL_COUNT)
194 #define SMMU_PDIR_SHIFT 12
195 #define SMMU_PDE_SHIFT  12
196 #define SMMU_PTE_SHIFT  12
197 #define SMMU_PFN_MASK   0x000fffff
198
199 #define SMMU_ADDR_TO_PFN(addr)  ((addr) >> 12)
200 #define SMMU_ADDR_TO_PDN(addr)  ((addr) >> 22)
201 #define SMMU_PDN_TO_ADDR(pdn)   ((pdn) << 22)
202
203 #define _READABLE       (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
204 #define _WRITABLE       (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
205 #define _NONSECURE      (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
206 #define _PDE_NEXT       (1 << SMMU_PDE_NEXT_SHIFT)
207 #define _MASK_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
208
209 #define _PDIR_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
210
211 #define _PDE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
212 #define _PDE_ATTR_N     (_PDE_ATTR | _PDE_NEXT)
213 #define _PDE_VACANT(pdn)        (((pdn) << 10) | _PDE_ATTR)
214
215 #define _PTE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
216 #define _PTE_VACANT(addr)       (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
217
218 #define SMMU_MK_PDIR(page, attr)        \
219                 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
220 #define SMMU_MK_PDE(page, attr)         \
221                 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
222 #define SMMU_EX_PTBL_PAGE(pde)          \
223                 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
224 #define SMMU_PFN_TO_PTE(pfn, attr)      (unsigned long)((pfn) | (attr))
225
226 #define SMMU_ASID_ENABLE(asid)  ((asid) | (1 << 31))
227 #define SMMU_ASID_DISABLE       0
228 #define SMMU_ASID_ASID(n)       ((n) & ~SMMU_ASID_ENABLE(0))
229
230 #define NUM_SMMU_REG_BANKS      3
231
232 #define smmu_client_enable_hwgrp(c, m)  smmu_client_set_hwgrp(c, m, 1)
233 #define smmu_client_disable_hwgrp(c)    smmu_client_set_hwgrp(c, 0, 0)
234 #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
235 #define __smmu_client_disable_hwgrp(c)  __smmu_client_set_hwgrp(c, 0, 0)
236
237 #define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
238
239 static const u32 smmu_hwgrp_asid_reg[] = {
240         HWGRP_INIT(AFI),
241         HWGRP_INIT(AVPC),
242         HWGRP_INIT(DC),
243         HWGRP_INIT(DCB),
244         HWGRP_INIT(EPP),
245         HWGRP_INIT(G2),
246         HWGRP_INIT(HC),
247         HWGRP_INIT(HDA),
248         HWGRP_INIT(ISP),
249         HWGRP_INIT(MPE),
250         HWGRP_INIT(NV),
251         HWGRP_INIT(NV2),
252         HWGRP_INIT(PPCS),
253         HWGRP_INIT(SATA),
254         HWGRP_INIT(VDE),
255         HWGRP_INIT(VI),
256 };
257 #define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
258
259 /*
260  * Per client for address space
261  */
262 struct smmu_client {
263         struct device           *dev;
264         struct list_head        list;
265         struct smmu_as          *as;
266         u32                     hwgrp;
267 };
268
269 /*
270  * Per address space
271  */
272 struct smmu_as {
273         struct smmu_device      *smmu;  /* back pointer to container */
274         unsigned int            asid;
275         spinlock_t              lock;   /* for pagetable */
276         struct page             *pdir_page;
277         unsigned long           pdir_attr;
278         unsigned long           pde_attr;
279         unsigned long           pte_attr;
280         unsigned int            *pte_count;
281
282         struct list_head        client;
283         spinlock_t              client_lock; /* for client list */
284 };
285
286 struct smmu_debugfs_info {
287         struct smmu_device *smmu;
288         int mc;
289         int cache;
290 };
291
292 /*
293  * Per SMMU device - IOMMU device
294  */
295 struct smmu_device {
296         void __iomem    *regbase;       /* register offset base */
297         void __iomem    **regs;         /* register block start address array */
298         void __iomem    **rege;         /* register block end address array */
299         int             nregs;          /* number of register blocks */
300
301         unsigned long   iovmm_base;     /* remappable base address */
302         unsigned long   page_count;     /* total remappable size */
303         spinlock_t      lock;
304         char            *name;
305         struct device   *dev;
306         struct page *avp_vector_page;   /* dummy page shared by all AS's */
307
308         /*
309          * Register image savers for suspend/resume
310          */
311         unsigned long translation_enable_0;
312         unsigned long translation_enable_1;
313         unsigned long translation_enable_2;
314         unsigned long asid_security;
315
316         struct dentry *debugfs_root;
317         struct smmu_debugfs_info *debugfs_info;
318
319         struct device_node *ahb;
320
321         int             num_as;
322         struct smmu_as  as[0];          /* Run-time allocated array */
323 };
324
325 static struct smmu_device *smmu_handle; /* unique for a system */
326
327 /*
328  *      SMMU register accessors
329  */
330 static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
331 {
332         int i;
333
334         for (i = 0; i < smmu->nregs; i++) {
335                 void __iomem *addr = smmu->regbase + offs;
336
337                 BUG_ON(addr < smmu->regs[i]);
338                 if (addr <= smmu->rege[i])
339                         return readl(addr);
340         }
341
342         BUG();
343 }
344
345 static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
346 {
347         int i;
348
349         for (i = 0; i < smmu->nregs; i++) {
350                 void __iomem *addr = smmu->regbase + offs;
351
352                 BUG_ON(addr < smmu->regs[i]);
353                 if (addr <= smmu->rege[i]) {
354                         writel(val, addr);
355                         return;
356                 }
357         }
358
359         BUG();
360 }
361
362 #define VA_PAGE_TO_PA(va, page) \
363         (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
364
365 #define FLUSH_CPU_DCACHE(va, page, size)        \
366         do {    \
367                 unsigned long _pa_ = VA_PAGE_TO_PA(va, page);           \
368                 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
369                 outer_flush_range(_pa_, _pa_+(size_t)(size));           \
370         } while (0)
371
372 /*
373  * Any interaction between any block on PPSB and a block on APB or AHB
374  * must have these read-back barriers to ensure the APB/AHB bus
375  * transaction is complete before initiating activity on the PPSB
376  * block.
377  */
378 #define FLUSH_SMMU_REGS(smmu)   smmu_read(smmu, SMMU_CONFIG)
379
380 #define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data)
381
382 static int __smmu_client_set_hwgrp(struct smmu_client *c,
383                                    unsigned long map, int on)
384 {
385         int i;
386         struct smmu_as *as = c->as;
387         u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid);
388         struct smmu_device *smmu = as->smmu;
389
390         WARN_ON(!on && map);
391         if (on && !map)
392                 return -EINVAL;
393         if (!on)
394                 map = smmu_client_hwgrp(c);
395
396         for_each_set_bit(i, &map, HWGRP_COUNT) {
397                 offs = HWGRP_ASID_REG(i);
398                 val = smmu_read(smmu, offs);
399                 if (on) {
400                         if (WARN_ON(val & mask))
401                                 goto err_hw_busy;
402                         val |= mask;
403                 } else {
404                         WARN_ON((val & mask) == mask);
405                         val &= ~mask;
406                 }
407                 smmu_write(smmu, val, offs);
408         }
409         FLUSH_SMMU_REGS(smmu);
410         c->hwgrp = map;
411         return 0;
412
413 err_hw_busy:
414         for_each_set_bit(i, &map, HWGRP_COUNT) {
415                 offs = HWGRP_ASID_REG(i);
416                 val = smmu_read(smmu, offs);
417                 val &= ~mask;
418                 smmu_write(smmu, val, offs);
419         }
420         return -EBUSY;
421 }
422
423 static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on)
424 {
425         u32 val;
426         unsigned long flags;
427         struct smmu_as *as = c->as;
428         struct smmu_device *smmu = as->smmu;
429
430         spin_lock_irqsave(&smmu->lock, flags);
431         val = __smmu_client_set_hwgrp(c, map, on);
432         spin_unlock_irqrestore(&smmu->lock, flags);
433         return val;
434 }
435
436 /*
437  * Flush all TLB entries and all PTC entries
438  * Caller must lock smmu
439  */
440 static void smmu_flush_regs(struct smmu_device *smmu, int enable)
441 {
442         u32 val;
443
444         smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
445         FLUSH_SMMU_REGS(smmu);
446         val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
447                 SMMU_TLB_FLUSH_ASID_MATCH_disable;
448         smmu_write(smmu, val, SMMU_TLB_FLUSH);
449
450         if (enable)
451                 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
452         FLUSH_SMMU_REGS(smmu);
453 }
454
455 static int smmu_setup_regs(struct smmu_device *smmu)
456 {
457         int i;
458         u32 val;
459
460         for (i = 0; i < smmu->num_as; i++) {
461                 struct smmu_as *as = &smmu->as[i];
462                 struct smmu_client *c;
463
464                 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
465                 val = as->pdir_page ?
466                         SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
467                         SMMU_PTB_DATA_RESET_VAL;
468                 smmu_write(smmu, val, SMMU_PTB_DATA);
469
470                 list_for_each_entry(c, &as->client, list)
471                         __smmu_client_set_hwgrp(c, c->hwgrp, 1);
472         }
473
474         smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
475         smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
476         smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
477         smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
478         smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
479         smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
480
481         smmu_flush_regs(smmu, 1);
482
483         return tegra_ahb_enable_smmu(smmu->ahb);
484 }
485
486 static void flush_ptc_and_tlb(struct smmu_device *smmu,
487                       struct smmu_as *as, dma_addr_t iova,
488                       unsigned long *pte, struct page *page, int is_pde)
489 {
490         u32 val;
491         unsigned long tlb_flush_va = is_pde
492                 ?  SMMU_TLB_FLUSH_VA(iova, SECTION)
493                 :  SMMU_TLB_FLUSH_VA(iova, GROUP);
494
495         val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
496         smmu_write(smmu, val, SMMU_PTC_FLUSH);
497         FLUSH_SMMU_REGS(smmu);
498         val = tlb_flush_va |
499                 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
500                 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
501         smmu_write(smmu, val, SMMU_TLB_FLUSH);
502         FLUSH_SMMU_REGS(smmu);
503 }
504
505 static void free_ptbl(struct smmu_as *as, dma_addr_t iova)
506 {
507         unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
508         unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
509
510         if (pdir[pdn] != _PDE_VACANT(pdn)) {
511                 dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
512
513                 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
514                 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
515                 pdir[pdn] = _PDE_VACANT(pdn);
516                 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
517                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
518                                   as->pdir_page, 1);
519         }
520 }
521
522 static void free_pdir(struct smmu_as *as)
523 {
524         unsigned addr;
525         int count;
526         struct device *dev = as->smmu->dev;
527
528         if (!as->pdir_page)
529                 return;
530
531         addr = as->smmu->iovmm_base;
532         count = as->smmu->page_count;
533         while (count-- > 0) {
534                 free_ptbl(as, addr);
535                 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
536         }
537         ClearPageReserved(as->pdir_page);
538         __free_page(as->pdir_page);
539         as->pdir_page = NULL;
540         devm_kfree(dev, as->pte_count);
541         as->pte_count = NULL;
542 }
543
544 /*
545  * Maps PTBL for given iova and returns the PTE address
546  * Caller must unmap the mapped PTBL returned in *ptbl_page_p
547  */
548 static unsigned long *locate_pte(struct smmu_as *as,
549                                  dma_addr_t iova, bool allocate,
550                                  struct page **ptbl_page_p,
551                                  unsigned int **count)
552 {
553         unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
554         unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
555         unsigned long *pdir = page_address(as->pdir_page);
556         unsigned long *ptbl;
557
558         if (pdir[pdn] != _PDE_VACANT(pdn)) {
559                 /* Mapped entry table already exists */
560                 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
561                 ptbl = page_address(*ptbl_page_p);
562         } else if (!allocate) {
563                 return NULL;
564         } else {
565                 int pn;
566                 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
567
568                 /* Vacant - allocate a new page table */
569                 dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
570
571                 *ptbl_page_p = alloc_page(GFP_ATOMIC);
572                 if (!*ptbl_page_p) {
573                         dev_err(as->smmu->dev,
574                                 "failed to allocate smmu_device page table\n");
575                         return NULL;
576                 }
577                 SetPageReserved(*ptbl_page_p);
578                 ptbl = (unsigned long *)page_address(*ptbl_page_p);
579                 for (pn = 0; pn < SMMU_PTBL_COUNT;
580                      pn++, addr += SMMU_PAGE_SIZE) {
581                         ptbl[pn] = _PTE_VACANT(addr);
582                 }
583                 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
584                 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
585                                         as->pde_attr | _PDE_NEXT);
586                 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
587                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
588                                   as->pdir_page, 1);
589         }
590         *count = &as->pte_count[pdn];
591
592         return &ptbl[ptn % SMMU_PTBL_COUNT];
593 }
594
595 #ifdef CONFIG_SMMU_SIG_DEBUG
596 static void put_signature(struct smmu_as *as,
597                           dma_addr_t iova, unsigned long pfn)
598 {
599         struct page *page;
600         unsigned long *vaddr;
601
602         page = pfn_to_page(pfn);
603         vaddr = page_address(page);
604         if (!vaddr)
605                 return;
606
607         vaddr[0] = iova;
608         vaddr[1] = pfn << PAGE_SHIFT;
609         FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
610 }
611 #else
612 static inline void put_signature(struct smmu_as *as,
613                                  unsigned long addr, unsigned long pfn)
614 {
615 }
616 #endif
617
618 /*
619  * Caller must not hold as->lock
620  */
621 static int alloc_pdir(struct smmu_as *as)
622 {
623         unsigned long *pdir, flags;
624         int pdn, err = 0;
625         u32 val;
626         struct smmu_device *smmu = as->smmu;
627         struct page *page;
628         unsigned int *cnt;
629
630         /*
631          * do the allocation, then grab as->lock
632          */
633         cnt = devm_kzalloc(smmu->dev,
634                            sizeof(cnt[0]) * SMMU_PDIR_COUNT,
635                            GFP_KERNEL);
636         page = alloc_page(GFP_KERNEL | __GFP_DMA);
637
638         spin_lock_irqsave(&as->lock, flags);
639
640         if (as->pdir_page) {
641                 /* We raced, free the redundant */
642                 err = -EAGAIN;
643                 goto err_out;
644         }
645
646         if (!page || !cnt) {
647                 dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
648                 err = -ENOMEM;
649                 goto err_out;
650         }
651
652         as->pdir_page = page;
653         as->pte_count = cnt;
654
655         SetPageReserved(as->pdir_page);
656         pdir = page_address(as->pdir_page);
657
658         for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
659                 pdir[pdn] = _PDE_VACANT(pdn);
660         FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
661         val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
662         smmu_write(smmu, val, SMMU_PTC_FLUSH);
663         FLUSH_SMMU_REGS(as->smmu);
664         val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
665                 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
666                 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
667         smmu_write(smmu, val, SMMU_TLB_FLUSH);
668         FLUSH_SMMU_REGS(as->smmu);
669
670         spin_unlock_irqrestore(&as->lock, flags);
671
672         return 0;
673
674 err_out:
675         spin_unlock_irqrestore(&as->lock, flags);
676
677         devm_kfree(smmu->dev, cnt);
678         if (page)
679                 __free_page(page);
680         return err;
681 }
682
683 static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
684 {
685         unsigned long *pte;
686         struct page *page;
687         unsigned int *count;
688
689         pte = locate_pte(as, iova, false, &page, &count);
690         if (WARN_ON(!pte))
691                 return;
692
693         if (WARN_ON(*pte == _PTE_VACANT(iova)))
694                 return;
695
696         *pte = _PTE_VACANT(iova);
697         FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
698         flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
699         if (!--(*count))
700                 free_ptbl(as, iova);
701 }
702
703 static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
704                                  unsigned long pfn)
705 {
706         struct smmu_device *smmu = as->smmu;
707         unsigned long *pte;
708         unsigned int *count;
709         struct page *page;
710
711         pte = locate_pte(as, iova, true, &page, &count);
712         if (WARN_ON(!pte))
713                 return;
714
715         if (*pte == _PTE_VACANT(iova))
716                 (*count)++;
717         *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
718         if (unlikely((*pte == _PTE_VACANT(iova))))
719                 (*count)--;
720         FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
721         flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
722         put_signature(as, iova, pfn);
723 }
724
725 static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
726                           phys_addr_t pa, size_t bytes, int prot)
727 {
728         struct smmu_as *as = domain->priv;
729         unsigned long pfn = __phys_to_pfn(pa);
730         unsigned long flags;
731
732         dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
733
734         if (!pfn_valid(pfn))
735                 return -ENOMEM;
736
737         spin_lock_irqsave(&as->lock, flags);
738         __smmu_iommu_map_pfn(as, iova, pfn);
739         spin_unlock_irqrestore(&as->lock, flags);
740         return 0;
741 }
742
743 static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
744                                size_t bytes)
745 {
746         struct smmu_as *as = domain->priv;
747         unsigned long flags;
748
749         dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova);
750
751         spin_lock_irqsave(&as->lock, flags);
752         __smmu_iommu_unmap(as, iova);
753         spin_unlock_irqrestore(&as->lock, flags);
754         return SMMU_PAGE_SIZE;
755 }
756
757 static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
758                                            unsigned long iova)
759 {
760         struct smmu_as *as = domain->priv;
761         unsigned long *pte;
762         unsigned int *count;
763         struct page *page;
764         unsigned long pfn;
765         unsigned long flags;
766
767         spin_lock_irqsave(&as->lock, flags);
768
769         pte = locate_pte(as, iova, true, &page, &count);
770         pfn = *pte & SMMU_PFN_MASK;
771         WARN_ON(!pfn_valid(pfn));
772         dev_dbg(as->smmu->dev,
773                 "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
774
775         spin_unlock_irqrestore(&as->lock, flags);
776         return PFN_PHYS(pfn);
777 }
778
779 static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
780                                      unsigned long cap)
781 {
782         return 0;
783 }
784
785 static int smmu_iommu_attach_dev(struct iommu_domain *domain,
786                                  struct device *dev)
787 {
788         struct smmu_as *as = domain->priv;
789         struct smmu_device *smmu = as->smmu;
790         struct smmu_client *client, *c;
791         u32 map;
792         int err;
793
794         client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL);
795         if (!client)
796                 return -ENOMEM;
797         client->dev = dev;
798         client->as = as;
799         map = (unsigned long)dev->platform_data;
800         if (!map)
801                 return -EINVAL;
802
803         err = smmu_client_enable_hwgrp(client, map);
804         if (err)
805                 goto err_hwgrp;
806
807         spin_lock(&as->client_lock);
808         list_for_each_entry(c, &as->client, list) {
809                 if (c->dev == dev) {
810                         dev_err(smmu->dev,
811                                 "%s is already attached\n", dev_name(c->dev));
812                         err = -EINVAL;
813                         goto err_client;
814                 }
815         }
816         list_add(&client->list, &as->client);
817         spin_unlock(&as->client_lock);
818
819         /*
820          * Reserve "page zero" for AVP vectors using a common dummy
821          * page.
822          */
823         if (map & HWG_AVPC) {
824                 struct page *page;
825
826                 page = as->smmu->avp_vector_page;
827                 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page));
828
829                 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
830         }
831
832         dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
833         return 0;
834
835 err_client:
836         smmu_client_disable_hwgrp(client);
837         spin_unlock(&as->client_lock);
838 err_hwgrp:
839         devm_kfree(smmu->dev, client);
840         return err;
841 }
842
843 static void smmu_iommu_detach_dev(struct iommu_domain *domain,
844                                   struct device *dev)
845 {
846         struct smmu_as *as = domain->priv;
847         struct smmu_device *smmu = as->smmu;
848         struct smmu_client *c;
849
850         spin_lock(&as->client_lock);
851
852         list_for_each_entry(c, &as->client, list) {
853                 if (c->dev == dev) {
854                         smmu_client_disable_hwgrp(c);
855                         list_del(&c->list);
856                         devm_kfree(smmu->dev, c);
857                         c->as = NULL;
858                         dev_dbg(smmu->dev,
859                                 "%s is detached\n", dev_name(c->dev));
860                         goto out;
861                 }
862         }
863         dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
864 out:
865         spin_unlock(&as->client_lock);
866 }
867
868 static int smmu_iommu_domain_init(struct iommu_domain *domain)
869 {
870         int i, err = -EAGAIN;
871         unsigned long flags;
872         struct smmu_as *as;
873         struct smmu_device *smmu = smmu_handle;
874
875         /* Look for a free AS with lock held */
876         for  (i = 0; i < smmu->num_as; i++) {
877                 as = &smmu->as[i];
878
879                 if (as->pdir_page)
880                         continue;
881
882                 err = alloc_pdir(as);
883                 if (!err)
884                         goto found;
885
886                 if (err != -EAGAIN)
887                         break;
888         }
889         if (i == smmu->num_as)
890                 dev_err(smmu->dev,  "no free AS\n");
891         return err;
892
893 found:
894         spin_lock_irqsave(&smmu->lock, flags);
895
896         /* Update PDIR register */
897         smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
898         smmu_write(smmu,
899                    SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
900         FLUSH_SMMU_REGS(smmu);
901
902         spin_unlock_irqrestore(&smmu->lock, flags);
903
904         domain->priv = as;
905
906         domain->geometry.aperture_start = smmu->iovmm_base;
907         domain->geometry.aperture_end   = smmu->iovmm_base +
908                 smmu->page_count * SMMU_PAGE_SIZE - 1;
909         domain->geometry.force_aperture = true;
910
911         dev_dbg(smmu->dev, "smmu_as@%p\n", as);
912
913         return 0;
914 }
915
916 static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
917 {
918         struct smmu_as *as = domain->priv;
919         struct smmu_device *smmu = as->smmu;
920         unsigned long flags;
921
922         spin_lock_irqsave(&as->lock, flags);
923
924         if (as->pdir_page) {
925                 spin_lock(&smmu->lock);
926                 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
927                 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
928                 FLUSH_SMMU_REGS(smmu);
929                 spin_unlock(&smmu->lock);
930
931                 free_pdir(as);
932         }
933
934         if (!list_empty(&as->client)) {
935                 struct smmu_client *c;
936
937                 list_for_each_entry(c, &as->client, list)
938                         smmu_iommu_detach_dev(domain, c->dev);
939         }
940
941         spin_unlock_irqrestore(&as->lock, flags);
942
943         domain->priv = NULL;
944         dev_dbg(smmu->dev, "smmu_as@%p\n", as);
945 }
946
947 static struct iommu_ops smmu_iommu_ops = {
948         .domain_init    = smmu_iommu_domain_init,
949         .domain_destroy = smmu_iommu_domain_destroy,
950         .attach_dev     = smmu_iommu_attach_dev,
951         .detach_dev     = smmu_iommu_detach_dev,
952         .map            = smmu_iommu_map,
953         .unmap          = smmu_iommu_unmap,
954         .iova_to_phys   = smmu_iommu_iova_to_phys,
955         .domain_has_cap = smmu_iommu_domain_has_cap,
956         .pgsize_bitmap  = SMMU_IOMMU_PGSIZES,
957 };
958
959 /* Should be in the order of enum */
960 static const char * const smmu_debugfs_mc[] = { "mc", };
961 static const char * const smmu_debugfs_cache[] = {  "tlb", "ptc", };
962
963 static ssize_t smmu_debugfs_stats_write(struct file *file,
964                                         const char __user *buffer,
965                                         size_t count, loff_t *pos)
966 {
967         struct smmu_debugfs_info *info;
968         struct smmu_device *smmu;
969         struct dentry *dent;
970         int i;
971         enum {
972                 _OFF = 0,
973                 _ON,
974                 _RESET,
975         };
976         const char * const command[] = {
977                 [_OFF]          = "off",
978                 [_ON]           = "on",
979                 [_RESET]        = "reset",
980         };
981         char str[] = "reset";
982         u32 val;
983         size_t offs;
984
985         count = min_t(size_t, count, sizeof(str));
986         if (copy_from_user(str, buffer, count))
987                 return -EINVAL;
988
989         for (i = 0; i < ARRAY_SIZE(command); i++)
990                 if (strncmp(str, command[i],
991                             strlen(command[i])) == 0)
992                         break;
993
994         if (i == ARRAY_SIZE(command))
995                 return -EINVAL;
996
997         dent = file->f_dentry;
998         info = dent->d_inode->i_private;
999         smmu = info->smmu;
1000
1001         offs = SMMU_CACHE_CONFIG(info->cache);
1002         val = smmu_read(smmu, offs);
1003         switch (i) {
1004         case _OFF:
1005                 val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
1006                 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1007                 smmu_write(smmu, val, offs);
1008                 break;
1009         case _ON:
1010                 val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
1011                 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1012                 smmu_write(smmu, val, offs);
1013                 break;
1014         case _RESET:
1015                 val |= SMMU_CACHE_CONFIG_STATS_TEST;
1016                 smmu_write(smmu, val, offs);
1017                 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1018                 smmu_write(smmu, val, offs);
1019                 break;
1020         default:
1021                 BUG();
1022                 break;
1023         }
1024
1025         dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
1026                 val, smmu_read(smmu, offs), offs);
1027
1028         return count;
1029 }
1030
1031 static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
1032 {
1033         struct smmu_debugfs_info *info;
1034         struct smmu_device *smmu;
1035         struct dentry *dent;
1036         int i;
1037         const char * const stats[] = { "hit", "miss", };
1038
1039         dent = d_find_alias(s->private);
1040         info = dent->d_inode->i_private;
1041         smmu = info->smmu;
1042
1043         for (i = 0; i < ARRAY_SIZE(stats); i++) {
1044                 u32 val;
1045                 size_t offs;
1046
1047                 offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
1048                 val = smmu_read(smmu, offs);
1049                 seq_printf(s, "%s:%08x ", stats[i], val);
1050
1051                 dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
1052                         stats[i], val, offs);
1053         }
1054         seq_printf(s, "\n");
1055         dput(dent);
1056
1057         return 0;
1058 }
1059
1060 static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
1061 {
1062         return single_open(file, smmu_debugfs_stats_show, inode);
1063 }
1064
1065 static const struct file_operations smmu_debugfs_stats_fops = {
1066         .open           = smmu_debugfs_stats_open,
1067         .read           = seq_read,
1068         .llseek         = seq_lseek,
1069         .release        = single_release,
1070         .write          = smmu_debugfs_stats_write,
1071 };
1072
1073 static void smmu_debugfs_delete(struct smmu_device *smmu)
1074 {
1075         debugfs_remove_recursive(smmu->debugfs_root);
1076         kfree(smmu->debugfs_info);
1077 }
1078
1079 static void smmu_debugfs_create(struct smmu_device *smmu)
1080 {
1081         int i;
1082         size_t bytes;
1083         struct dentry *root;
1084
1085         bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
1086                 sizeof(*smmu->debugfs_info);
1087         smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
1088         if (!smmu->debugfs_info)
1089                 return;
1090
1091         root = debugfs_create_dir(dev_name(smmu->dev), NULL);
1092         if (!root)
1093                 goto err_out;
1094         smmu->debugfs_root = root;
1095
1096         for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
1097                 int j;
1098                 struct dentry *mc;
1099
1100                 mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
1101                 if (!mc)
1102                         goto err_out;
1103
1104                 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
1105                         struct dentry *cache;
1106                         struct smmu_debugfs_info *info;
1107
1108                         info = smmu->debugfs_info;
1109                         info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
1110                         info->smmu = smmu;
1111                         info->mc = i;
1112                         info->cache = j;
1113
1114                         cache = debugfs_create_file(smmu_debugfs_cache[j],
1115                                                     S_IWUGO | S_IRUGO, mc,
1116                                                     (void *)info,
1117                                                     &smmu_debugfs_stats_fops);
1118                         if (!cache)
1119                                 goto err_out;
1120                 }
1121         }
1122
1123         return;
1124
1125 err_out:
1126         smmu_debugfs_delete(smmu);
1127 }
1128
1129 static int tegra_smmu_suspend(struct device *dev)
1130 {
1131         struct smmu_device *smmu = dev_get_drvdata(dev);
1132
1133         smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0);
1134         smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1);
1135         smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2);
1136         smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY);
1137         return 0;
1138 }
1139
1140 static int tegra_smmu_resume(struct device *dev)
1141 {
1142         struct smmu_device *smmu = dev_get_drvdata(dev);
1143         unsigned long flags;
1144         int err;
1145
1146         spin_lock_irqsave(&smmu->lock, flags);
1147         err = smmu_setup_regs(smmu);
1148         spin_unlock_irqrestore(&smmu->lock, flags);
1149         return err;
1150 }
1151
1152 static int tegra_smmu_probe(struct platform_device *pdev)
1153 {
1154         struct smmu_device *smmu;
1155         struct device *dev = &pdev->dev;
1156         int i, asids, err = 0;
1157         dma_addr_t uninitialized_var(base);
1158         size_t bytes, uninitialized_var(size);
1159
1160         if (smmu_handle)
1161                 return -EIO;
1162
1163         BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
1164
1165         if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
1166                 return -ENODEV;
1167
1168         bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
1169         smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
1170         if (!smmu) {
1171                 dev_err(dev, "failed to allocate smmu_device\n");
1172                 return -ENOMEM;
1173         }
1174
1175         smmu->nregs = pdev->num_resources;
1176         smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs),
1177                                   GFP_KERNEL);
1178         smmu->rege = smmu->regs + smmu->nregs;
1179         if (!smmu->regs)
1180                 return -ENOMEM;
1181         for (i = 0; i < smmu->nregs; i++) {
1182                 struct resource *res;
1183
1184                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1185                 if (!res)
1186                         return -ENODEV;
1187                 smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
1188                 if (!smmu->regs[i])
1189                         return -EBUSY;
1190                 smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1;
1191         }
1192         /* Same as "mc" 1st regiter block start address */
1193         smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & ~PAGE_MASK);
1194
1195         err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
1196         if (err)
1197                 return -ENODEV;
1198
1199         if (size & SMMU_PAGE_MASK)
1200                 return -EINVAL;
1201
1202         size >>= SMMU_PAGE_SHIFT;
1203         if (!size)
1204                 return -EINVAL;
1205
1206         smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
1207         if (!smmu->ahb)
1208                 return -ENODEV;
1209
1210         smmu->dev = dev;
1211         smmu->num_as = asids;
1212         smmu->iovmm_base = base;
1213         smmu->page_count = size;
1214
1215         smmu->translation_enable_0 = ~0;
1216         smmu->translation_enable_1 = ~0;
1217         smmu->translation_enable_2 = ~0;
1218         smmu->asid_security = 0;
1219
1220         for (i = 0; i < smmu->num_as; i++) {
1221                 struct smmu_as *as = &smmu->as[i];
1222
1223                 as->smmu = smmu;
1224                 as->asid = i;
1225                 as->pdir_attr = _PDIR_ATTR;
1226                 as->pde_attr = _PDE_ATTR;
1227                 as->pte_attr = _PTE_ATTR;
1228
1229                 spin_lock_init(&as->lock);
1230                 spin_lock_init(&as->client_lock);
1231                 INIT_LIST_HEAD(&as->client);
1232         }
1233         spin_lock_init(&smmu->lock);
1234         err = smmu_setup_regs(smmu);
1235         if (err)
1236                 return err;
1237         platform_set_drvdata(pdev, smmu);
1238
1239         smmu->avp_vector_page = alloc_page(GFP_KERNEL);
1240         if (!smmu->avp_vector_page)
1241                 return -ENOMEM;
1242
1243         smmu_debugfs_create(smmu);
1244         smmu_handle = smmu;
1245         bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1246         return 0;
1247 }
1248
1249 static int tegra_smmu_remove(struct platform_device *pdev)
1250 {
1251         struct smmu_device *smmu = platform_get_drvdata(pdev);
1252         int i;
1253
1254         smmu_debugfs_delete(smmu);
1255
1256         smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
1257         for (i = 0; i < smmu->num_as; i++)
1258                 free_pdir(&smmu->as[i]);
1259         __free_page(smmu->avp_vector_page);
1260         smmu_handle = NULL;
1261         return 0;
1262 }
1263
1264 const struct dev_pm_ops tegra_smmu_pm_ops = {
1265         .suspend        = tegra_smmu_suspend,
1266         .resume         = tegra_smmu_resume,
1267 };
1268
1269 #ifdef CONFIG_OF
1270 static struct of_device_id tegra_smmu_of_match[] = {
1271         { .compatible = "nvidia,tegra30-smmu", },
1272         { },
1273 };
1274 MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
1275 #endif
1276
1277 static struct platform_driver tegra_smmu_driver = {
1278         .probe          = tegra_smmu_probe,
1279         .remove         = tegra_smmu_remove,
1280         .driver = {
1281                 .owner  = THIS_MODULE,
1282                 .name   = "tegra-smmu",
1283                 .pm     = &tegra_smmu_pm_ops,
1284                 .of_match_table = of_match_ptr(tegra_smmu_of_match),
1285         },
1286 };
1287
1288 static int tegra_smmu_init(void)
1289 {
1290         return platform_driver_register(&tegra_smmu_driver);
1291 }
1292
1293 static void __exit tegra_smmu_exit(void)
1294 {
1295         platform_driver_unregister(&tegra_smmu_driver);
1296 }
1297
1298 subsys_initcall(tegra_smmu_init);
1299 module_exit(tegra_smmu_exit);
1300
1301 MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
1302 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
1303 MODULE_ALIAS("platform:tegra-smmu");
1304 MODULE_LICENSE("GPL v2");