2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 42-bit addressing (dependent on VA_BITS)
28 * - Context fault reporting
31 #define pr_fmt(fmt) "arm-smmu: " fmt
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
38 #include <linux/iommu.h>
40 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
47 #include <linux/amba/bus.h>
49 #include <asm/pgalloc.h>
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS 128
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
76 #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
77 #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
78 #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
79 #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
80 #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
81 #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
83 #if PAGE_SIZE == SZ_4K
84 #define ARM_SMMU_PTE_CONT_ENTRIES 16
85 #elif PAGE_SIZE == SZ_64K
86 #define ARM_SMMU_PTE_CONT_ENTRIES 32
88 #define ARM_SMMU_PTE_CONT_ENTRIES 1
91 #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
92 #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
95 #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
96 #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
97 #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
98 #define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
101 #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
102 #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
103 #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
104 #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
105 #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
106 #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
108 /* Configuration registers */
109 #define ARM_SMMU_GR0_sCR0 0x0
110 #define sCR0_CLIENTPD (1 << 0)
111 #define sCR0_GFRE (1 << 1)
112 #define sCR0_GFIE (1 << 2)
113 #define sCR0_GCFGFRE (1 << 4)
114 #define sCR0_GCFGFIE (1 << 5)
115 #define sCR0_USFCFG (1 << 10)
116 #define sCR0_VMIDPNE (1 << 11)
117 #define sCR0_PTM (1 << 12)
118 #define sCR0_FB (1 << 13)
119 #define sCR0_BSU_SHIFT 14
120 #define sCR0_BSU_MASK 0x3
122 /* Identification registers */
123 #define ARM_SMMU_GR0_ID0 0x20
124 #define ARM_SMMU_GR0_ID1 0x24
125 #define ARM_SMMU_GR0_ID2 0x28
126 #define ARM_SMMU_GR0_ID3 0x2c
127 #define ARM_SMMU_GR0_ID4 0x30
128 #define ARM_SMMU_GR0_ID5 0x34
129 #define ARM_SMMU_GR0_ID6 0x38
130 #define ARM_SMMU_GR0_ID7 0x3c
131 #define ARM_SMMU_GR0_sGFSR 0x48
132 #define ARM_SMMU_GR0_sGFSYNR0 0x50
133 #define ARM_SMMU_GR0_sGFSYNR1 0x54
134 #define ARM_SMMU_GR0_sGFSYNR2 0x58
135 #define ARM_SMMU_GR0_PIDR0 0xfe0
136 #define ARM_SMMU_GR0_PIDR1 0xfe4
137 #define ARM_SMMU_GR0_PIDR2 0xfe8
139 #define ID0_S1TS (1 << 30)
140 #define ID0_S2TS (1 << 29)
141 #define ID0_NTS (1 << 28)
142 #define ID0_SMS (1 << 27)
143 #define ID0_PTFS_SHIFT 24
144 #define ID0_PTFS_MASK 0x2
145 #define ID0_PTFS_V8_ONLY 0x2
146 #define ID0_CTTW (1 << 14)
147 #define ID0_NUMIRPT_SHIFT 16
148 #define ID0_NUMIRPT_MASK 0xff
149 #define ID0_NUMSMRG_SHIFT 0
150 #define ID0_NUMSMRG_MASK 0xff
152 #define ID1_PAGESIZE (1 << 31)
153 #define ID1_NUMPAGENDXB_SHIFT 28
154 #define ID1_NUMPAGENDXB_MASK 7
155 #define ID1_NUMS2CB_SHIFT 16
156 #define ID1_NUMS2CB_MASK 0xff
157 #define ID1_NUMCB_SHIFT 0
158 #define ID1_NUMCB_MASK 0xff
160 #define ID2_OAS_SHIFT 4
161 #define ID2_OAS_MASK 0xf
162 #define ID2_IAS_SHIFT 0
163 #define ID2_IAS_MASK 0xf
164 #define ID2_UBS_SHIFT 8
165 #define ID2_UBS_MASK 0xf
166 #define ID2_PTFS_4K (1 << 12)
167 #define ID2_PTFS_16K (1 << 13)
168 #define ID2_PTFS_64K (1 << 14)
170 #define PIDR2_ARCH_SHIFT 4
171 #define PIDR2_ARCH_MASK 0xf
173 /* Global TLB invalidation */
174 #define ARM_SMMU_GR0_STLBIALL 0x60
175 #define ARM_SMMU_GR0_TLBIVMID 0x64
176 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
177 #define ARM_SMMU_GR0_TLBIALLH 0x6c
178 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
179 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
180 #define sTLBGSTATUS_GSACTIVE (1 << 0)
181 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
183 /* Stream mapping registers */
184 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
185 #define SMR_VALID (1 << 31)
186 #define SMR_MASK_SHIFT 16
187 #define SMR_MASK_MASK 0x7fff
188 #define SMR_ID_SHIFT 0
189 #define SMR_ID_MASK 0x7fff
191 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
192 #define S2CR_CBNDX_SHIFT 0
193 #define S2CR_CBNDX_MASK 0xff
194 #define S2CR_TYPE_SHIFT 16
195 #define S2CR_TYPE_MASK 0x3
196 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
197 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
198 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
200 /* Context bank attribute registers */
201 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
202 #define CBAR_VMID_SHIFT 0
203 #define CBAR_VMID_MASK 0xff
204 #define CBAR_S1_BPSHCFG_SHIFT 8
205 #define CBAR_S1_BPSHCFG_MASK 3
206 #define CBAR_S1_BPSHCFG_NSH 3
207 #define CBAR_S1_MEMATTR_SHIFT 12
208 #define CBAR_S1_MEMATTR_MASK 0xf
209 #define CBAR_S1_MEMATTR_WB 0xf
210 #define CBAR_TYPE_SHIFT 16
211 #define CBAR_TYPE_MASK 0x3
212 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
213 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
214 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
215 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
216 #define CBAR_IRPTNDX_SHIFT 24
217 #define CBAR_IRPTNDX_MASK 0xff
219 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
220 #define CBA2R_RW64_32BIT (0 << 0)
221 #define CBA2R_RW64_64BIT (1 << 0)
223 /* Translation context bank */
224 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
225 #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize)
227 #define ARM_SMMU_CB_SCTLR 0x0
228 #define ARM_SMMU_CB_RESUME 0x8
229 #define ARM_SMMU_CB_TTBCR2 0x10
230 #define ARM_SMMU_CB_TTBR0_LO 0x20
231 #define ARM_SMMU_CB_TTBR0_HI 0x24
232 #define ARM_SMMU_CB_TTBCR 0x30
233 #define ARM_SMMU_CB_S1_MAIR0 0x38
234 #define ARM_SMMU_CB_FSR 0x58
235 #define ARM_SMMU_CB_FAR_LO 0x60
236 #define ARM_SMMU_CB_FAR_HI 0x64
237 #define ARM_SMMU_CB_FSYNR0 0x68
238 #define ARM_SMMU_CB_S1_TLBIASID 0x610
240 #define SCTLR_S1_ASIDPNE (1 << 12)
241 #define SCTLR_CFCFG (1 << 7)
242 #define SCTLR_CFIE (1 << 6)
243 #define SCTLR_CFRE (1 << 5)
244 #define SCTLR_E (1 << 4)
245 #define SCTLR_AFE (1 << 2)
246 #define SCTLR_TRE (1 << 1)
247 #define SCTLR_M (1 << 0)
248 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
250 #define RESUME_RETRY (0 << 0)
251 #define RESUME_TERMINATE (1 << 0)
253 #define TTBCR_EAE (1 << 31)
255 #define TTBCR_PASIZE_SHIFT 16
256 #define TTBCR_PASIZE_MASK 0x7
258 #define TTBCR_TG0_4K (0 << 14)
259 #define TTBCR_TG0_64K (1 << 14)
261 #define TTBCR_SH0_SHIFT 12
262 #define TTBCR_SH0_MASK 0x3
263 #define TTBCR_SH_NS 0
264 #define TTBCR_SH_OS 2
265 #define TTBCR_SH_IS 3
267 #define TTBCR_ORGN0_SHIFT 10
268 #define TTBCR_IRGN0_SHIFT 8
269 #define TTBCR_RGN_MASK 0x3
270 #define TTBCR_RGN_NC 0
271 #define TTBCR_RGN_WBWA 1
272 #define TTBCR_RGN_WT 2
273 #define TTBCR_RGN_WB 3
275 #define TTBCR_SL0_SHIFT 6
276 #define TTBCR_SL0_MASK 0x3
277 #define TTBCR_SL0_LVL_2 0
278 #define TTBCR_SL0_LVL_1 1
280 #define TTBCR_T1SZ_SHIFT 16
281 #define TTBCR_T0SZ_SHIFT 0
282 #define TTBCR_SZ_MASK 0xf
284 #define TTBCR2_SEP_SHIFT 15
285 #define TTBCR2_SEP_MASK 0x7
287 #define TTBCR2_PASIZE_SHIFT 0
288 #define TTBCR2_PASIZE_MASK 0x7
290 /* Common definitions for PASize and SEP fields */
291 #define TTBCR2_ADDR_32 0
292 #define TTBCR2_ADDR_36 1
293 #define TTBCR2_ADDR_40 2
294 #define TTBCR2_ADDR_42 3
295 #define TTBCR2_ADDR_44 4
296 #define TTBCR2_ADDR_48 5
298 #define TTBRn_HI_ASID_SHIFT 16
300 #define MAIR_ATTR_SHIFT(n) ((n) << 3)
301 #define MAIR_ATTR_MASK 0xff
302 #define MAIR_ATTR_DEVICE 0x04
303 #define MAIR_ATTR_NC 0x44
304 #define MAIR_ATTR_WBRWA 0xff
305 #define MAIR_ATTR_IDX_NC 0
306 #define MAIR_ATTR_IDX_CACHE 1
307 #define MAIR_ATTR_IDX_DEV 2
309 #define FSR_MULTI (1 << 31)
310 #define FSR_SS (1 << 30)
311 #define FSR_UUT (1 << 8)
312 #define FSR_ASF (1 << 7)
313 #define FSR_TLBLKF (1 << 6)
314 #define FSR_TLBMCF (1 << 5)
315 #define FSR_EF (1 << 4)
316 #define FSR_PF (1 << 3)
317 #define FSR_AFF (1 << 2)
318 #define FSR_TF (1 << 1)
320 #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \
322 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
323 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
325 #define FSYNR0_WNR (1 << 4)
327 struct arm_smmu_smr {
333 struct arm_smmu_master_cfg {
335 u16 streamids[MAX_MASTER_STREAMIDS];
338 * We only need to allocate these on the root SMMU, as we
339 * configure unmatched streams to bypass translation.
341 struct arm_smmu_smr *smrs;
344 struct arm_smmu_master {
345 struct device_node *of_node;
348 * The following is specific to the master's position in the
352 struct arm_smmu_master_cfg cfg;
355 struct arm_smmu_device {
357 struct device_node *parent_of_node;
361 unsigned long pagesize;
363 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
364 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
365 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
366 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
367 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
370 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
374 u32 num_context_banks;
375 u32 num_s2_context_banks;
376 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
379 u32 num_mapping_groups;
380 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
382 unsigned long input_size;
383 unsigned long s1_output_size;
384 unsigned long s2_output_size;
387 u32 num_context_irqs;
390 struct list_head list;
391 struct rb_root masters;
394 struct arm_smmu_cfg {
395 struct arm_smmu_device *smmu;
401 #define INVALID_IRPTNDX 0xff
403 #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
404 #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
406 struct arm_smmu_domain {
408 * A domain can span across multiple, chained SMMUs and requires
409 * all devices within the domain to follow the same translation
412 struct arm_smmu_device *leaf_smmu;
413 struct arm_smmu_cfg root_cfg;
414 phys_addr_t output_mask;
419 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
420 static LIST_HEAD(arm_smmu_devices);
422 struct arm_smmu_option_prop {
427 static struct arm_smmu_option_prop arm_smmu_options [] = {
428 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
432 static void parse_driver_options(struct arm_smmu_device *smmu)
436 if (of_property_read_bool(smmu->dev->of_node,
437 arm_smmu_options[i].prop)) {
438 smmu->options |= arm_smmu_options[i].opt;
439 dev_notice(smmu->dev, "option %s\n",
440 arm_smmu_options[i].prop);
442 } while (arm_smmu_options[++i].opt);
445 static struct device *dev_get_master_dev(struct device *dev)
447 if (dev_is_pci(dev)) {
448 struct pci_bus *bus = to_pci_dev(dev)->bus;
449 while (!pci_is_root_bus(bus))
451 return bus->bridge->parent;
457 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
458 struct device_node *dev_node)
460 struct rb_node *node = smmu->masters.rb_node;
463 struct arm_smmu_master *master;
464 master = container_of(node, struct arm_smmu_master, node);
466 if (dev_node < master->of_node)
467 node = node->rb_left;
468 else if (dev_node > master->of_node)
469 node = node->rb_right;
477 static struct arm_smmu_master_cfg *
478 find_smmu_master_cfg(struct arm_smmu_device *smmu, struct device *dev)
480 struct arm_smmu_master *master;
483 return dev->archdata.iommu;
485 master = find_smmu_master(smmu, dev->of_node);
486 return master ? &master->cfg : NULL;
489 static int insert_smmu_master(struct arm_smmu_device *smmu,
490 struct arm_smmu_master *master)
492 struct rb_node **new, *parent;
494 new = &smmu->masters.rb_node;
497 struct arm_smmu_master *this;
498 this = container_of(*new, struct arm_smmu_master, node);
501 if (master->of_node < this->of_node)
502 new = &((*new)->rb_left);
503 else if (master->of_node > this->of_node)
504 new = &((*new)->rb_right);
509 rb_link_node(&master->node, parent, new);
510 rb_insert_color(&master->node, &smmu->masters);
514 static int register_smmu_master(struct arm_smmu_device *smmu,
516 struct of_phandle_args *masterspec)
519 struct arm_smmu_master *master;
521 master = find_smmu_master(smmu, masterspec->np);
524 "rejecting multiple registrations for master device %s\n",
525 masterspec->np->name);
529 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
531 "reached maximum number (%d) of stream IDs for master device %s\n",
532 MAX_MASTER_STREAMIDS, masterspec->np->name);
536 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
540 master->of_node = masterspec->np;
541 master->cfg.num_streamids = masterspec->args_count;
543 for (i = 0; i < master->cfg.num_streamids; ++i)
544 master->cfg.streamids[i] = masterspec->args[i];
546 return insert_smmu_master(smmu, master);
549 static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu)
551 struct arm_smmu_device *parent;
553 if (!smmu->parent_of_node)
556 spin_lock(&arm_smmu_devices_lock);
557 list_for_each_entry(parent, &arm_smmu_devices, list)
558 if (parent->dev->of_node == smmu->parent_of_node)
563 "Failed to find SMMU parent despite parent in DT\n");
565 spin_unlock(&arm_smmu_devices_lock);
569 static struct arm_smmu_device *find_parent_smmu_for_device(struct device *dev)
571 struct arm_smmu_device *child, *parent, *smmu;
572 struct arm_smmu_master *master = NULL;
573 struct device_node *dev_node = dev_get_master_dev(dev)->of_node;
575 spin_lock(&arm_smmu_devices_lock);
576 list_for_each_entry(parent, &arm_smmu_devices, list) {
579 /* Try to find a child of the current SMMU. */
580 list_for_each_entry(child, &arm_smmu_devices, list) {
581 if (child->parent_of_node == parent->dev->of_node) {
582 /* Does the child sit above our master? */
583 master = find_smmu_master(child, dev_node);
591 /* We found some children, so keep searching. */
597 master = find_smmu_master(smmu, dev_node);
601 spin_unlock(&arm_smmu_devices_lock);
602 return master ? smmu : NULL;
605 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
610 idx = find_next_zero_bit(map, end, start);
613 } while (test_and_set_bit(idx, map));
618 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
623 /* Wait for any pending TLB invalidations to complete */
624 static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
627 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
629 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
630 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
631 & sTLBGSTATUS_GSACTIVE) {
633 if (++count == TLB_LOOP_TIMEOUT) {
634 dev_err_ratelimited(smmu->dev,
635 "TLB sync timed out -- SMMU may be deadlocked\n");
642 static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg)
644 struct arm_smmu_device *smmu = cfg->smmu;
645 void __iomem *base = ARM_SMMU_GR0(smmu);
646 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
649 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
650 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
651 base + ARM_SMMU_CB_S1_TLBIASID);
653 base = ARM_SMMU_GR0(smmu);
654 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
655 base + ARM_SMMU_GR0_TLBIVMID);
658 arm_smmu_tlb_sync(smmu);
661 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
664 u32 fsr, far, fsynr, resume;
666 struct iommu_domain *domain = dev;
667 struct arm_smmu_domain *smmu_domain = domain->priv;
668 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
669 struct arm_smmu_device *smmu = root_cfg->smmu;
670 void __iomem *cb_base;
672 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
673 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
675 if (!(fsr & FSR_FAULT))
679 dev_err_ratelimited(smmu->dev,
680 "Unexpected context fault (fsr 0x%u)\n",
683 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
684 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
686 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
689 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
690 iova |= ((unsigned long)far << 32);
693 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
695 resume = RESUME_RETRY;
697 dev_err_ratelimited(smmu->dev,
698 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
699 iova, fsynr, root_cfg->cbndx);
701 resume = RESUME_TERMINATE;
704 /* Clear the faulting FSR */
705 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
707 /* Retry or terminate any stalled transactions */
709 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
714 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
716 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
717 struct arm_smmu_device *smmu = dev;
718 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
720 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
721 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
722 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
723 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
728 dev_err_ratelimited(smmu->dev,
729 "Unexpected global fault, this could be serious\n");
730 dev_err_ratelimited(smmu->dev,
731 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
732 gfsr, gfsynr0, gfsynr1, gfsynr2);
734 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
738 static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
741 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
744 /* Ensure new page tables are visible to the hardware walker */
745 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
749 * If the SMMU can't walk tables in the CPU caches, treat them
750 * like non-coherent DMA since we need to flush the new entries
751 * all the way out to memory. There's no possibility of
752 * recursion here as the SMMU table walker will not be wired
753 * through another SMMU.
755 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
760 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
764 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
765 struct arm_smmu_device *smmu = root_cfg->smmu;
766 void __iomem *cb_base, *gr0_base, *gr1_base;
768 gr0_base = ARM_SMMU_GR0(smmu);
769 gr1_base = ARM_SMMU_GR1(smmu);
770 stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS;
771 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
774 reg = root_cfg->cbar;
775 if (smmu->version == 1)
776 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
779 * Use the weakest shareability/memory types, so they are
780 * overridden by the ttbcr/pte.
783 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
784 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
786 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
788 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
790 if (smmu->version > 1) {
793 reg = CBA2R_RW64_64BIT;
795 reg = CBA2R_RW64_32BIT;
798 gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx));
801 switch (smmu->input_size) {
803 reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
806 reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
809 reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
812 reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
815 reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
818 reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
822 switch (smmu->s1_output_size) {
824 reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
827 reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
830 reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
833 reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
836 reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
839 reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
844 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
848 arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
849 PTRS_PER_PGD * sizeof(pgd_t));
850 reg = __pa(root_cfg->pgd);
851 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
852 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
854 reg |= ARM_SMMU_CB_ASID(root_cfg) << TTBRn_HI_ASID_SHIFT;
855 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
859 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
861 if (smmu->version > 1) {
862 if (PAGE_SIZE == SZ_4K)
868 reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
870 switch (smmu->s2_output_size) {
872 reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
875 reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
878 reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
881 reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
884 reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
887 reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
891 reg |= (64 - smmu->input_size) << TTBCR_T0SZ_SHIFT;
898 (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
899 (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
900 (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
901 (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
902 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
904 /* MAIR0 (stage-1 only) */
906 reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
907 (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
908 (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
909 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
913 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
915 reg |= SCTLR_S1_ASIDPNE;
919 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
922 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
924 struct arm_smmu_device *device_smmu)
927 struct arm_smmu_domain *smmu_domain = domain->priv;
928 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
929 struct arm_smmu_device *smmu, *parent;
932 * Walk the SMMU chain to find the root device for this chain.
933 * We assume that no masters have translations which terminate
934 * early, and therefore check that the root SMMU does indeed have
935 * a StreamID for the master in question.
937 parent = device_smmu;
938 smmu_domain->output_mask = -1;
941 smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1;
942 } while ((parent = find_parent_smmu(smmu)));
944 if (!find_smmu_master_cfg(smmu, dev)) {
945 dev_err(dev, "unable to find root SMMU config for device\n");
949 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
951 * We will likely want to change this if/when KVM gets
954 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
955 start = smmu->num_s2_context_banks;
956 } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) {
957 root_cfg->cbar = CBAR_TYPE_S2_TRANS;
960 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
961 start = smmu->num_s2_context_banks;
964 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
965 smmu->num_context_banks);
966 if (IS_ERR_VALUE(ret))
969 root_cfg->cbndx = ret;
970 if (smmu->version == 1) {
971 root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
972 root_cfg->irptndx %= smmu->num_context_irqs;
974 root_cfg->irptndx = root_cfg->cbndx;
977 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
978 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
979 "arm-smmu-context-fault", domain);
980 if (IS_ERR_VALUE(ret)) {
981 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
982 root_cfg->irptndx, irq);
983 root_cfg->irptndx = INVALID_IRPTNDX;
984 goto out_free_context;
987 root_cfg->smmu = smmu;
988 arm_smmu_init_context_bank(smmu_domain);
989 smmu_domain->leaf_smmu = device_smmu;
993 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
997 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
999 struct arm_smmu_domain *smmu_domain = domain->priv;
1000 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1001 struct arm_smmu_device *smmu = root_cfg->smmu;
1002 void __iomem *cb_base;
1008 /* Disable the context bank and nuke the TLB before freeing it. */
1009 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
1010 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1011 arm_smmu_tlb_inv_context(root_cfg);
1013 if (root_cfg->irptndx != INVALID_IRPTNDX) {
1014 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
1015 free_irq(irq, domain);
1018 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
1021 static int arm_smmu_domain_init(struct iommu_domain *domain)
1023 struct arm_smmu_domain *smmu_domain;
1027 * Allocate the domain and initialise some of its data structures.
1028 * We can't really do anything meaningful until we've added a
1031 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1035 pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
1037 goto out_free_domain;
1038 smmu_domain->root_cfg.pgd = pgd;
1040 spin_lock_init(&smmu_domain->lock);
1041 domain->priv = smmu_domain;
1049 static void arm_smmu_free_ptes(pmd_t *pmd)
1051 pgtable_t table = pmd_pgtable(*pmd);
1052 pgtable_page_dtor(table);
1056 static void arm_smmu_free_pmds(pud_t *pud)
1059 pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
1062 for (i = 0; i < PTRS_PER_PMD; ++i) {
1066 arm_smmu_free_ptes(pmd);
1070 pmd_free(NULL, pmd_base);
1073 static void arm_smmu_free_puds(pgd_t *pgd)
1076 pud_t *pud, *pud_base = pud_offset(pgd, 0);
1079 for (i = 0; i < PTRS_PER_PUD; ++i) {
1083 arm_smmu_free_pmds(pud);
1087 pud_free(NULL, pud_base);
1090 static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
1093 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1094 pgd_t *pgd, *pgd_base = root_cfg->pgd;
1097 * Recursively free the page tables for this domain. We don't
1098 * care about speculative TLB filling because the tables should
1099 * not be active in any context bank at this point (SCTLR.M is 0).
1102 for (i = 0; i < PTRS_PER_PGD; ++i) {
1105 arm_smmu_free_puds(pgd);
1112 static void arm_smmu_domain_destroy(struct iommu_domain *domain)
1114 struct arm_smmu_domain *smmu_domain = domain->priv;
1117 * Free the domain resources. We assume that all devices have
1118 * already been detached.
1120 arm_smmu_destroy_domain_context(domain);
1121 arm_smmu_free_pgtables(smmu_domain);
1125 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1126 struct arm_smmu_master_cfg *cfg)
1129 struct arm_smmu_smr *smrs;
1130 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1132 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1138 smrs = kmalloc(sizeof(*smrs) * cfg->num_streamids, GFP_KERNEL);
1140 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1141 cfg->num_streamids);
1145 /* Allocate the SMRs on the root SMMU */
1146 for (i = 0; i < cfg->num_streamids; ++i) {
1147 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1148 smmu->num_mapping_groups);
1149 if (IS_ERR_VALUE(idx)) {
1150 dev_err(smmu->dev, "failed to allocate free SMR\n");
1154 smrs[i] = (struct arm_smmu_smr) {
1156 .mask = 0, /* We don't currently share SMRs */
1157 .id = cfg->streamids[i],
1161 /* It worked! Now, poke the actual hardware */
1162 for (i = 0; i < cfg->num_streamids; ++i) {
1163 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1164 smrs[i].mask << SMR_MASK_SHIFT;
1165 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1173 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1178 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1179 struct arm_smmu_master_cfg *cfg)
1182 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1183 struct arm_smmu_smr *smrs = cfg->smrs;
1185 /* Invalidate the SMRs before freeing back to the allocator */
1186 for (i = 0; i < cfg->num_streamids; ++i) {
1187 u8 idx = smrs[i].idx;
1188 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1189 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1196 static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
1197 struct arm_smmu_master_cfg *cfg)
1200 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1202 for (i = 0; i < cfg->num_streamids; ++i) {
1203 u16 sid = cfg->streamids[i];
1204 writel_relaxed(S2CR_TYPE_BYPASS,
1205 gr0_base + ARM_SMMU_GR0_S2CR(sid));
1209 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1210 struct arm_smmu_master_cfg *cfg)
1213 struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu;
1214 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1216 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1220 /* Bypass the leaves */
1221 smmu = smmu_domain->leaf_smmu;
1222 while ((parent = find_parent_smmu(smmu))) {
1224 * We won't have a StreamID match for anything but the root
1225 * smmu, so we only need to worry about StreamID indexing,
1226 * where we must install bypass entries in the S2CRs.
1228 if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)
1231 arm_smmu_bypass_stream_mapping(smmu, cfg);
1235 /* Now we're at the root, time to point at our context bank */
1236 for (i = 0; i < cfg->num_streamids; ++i) {
1238 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1239 s2cr = S2CR_TYPE_TRANS |
1240 (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
1241 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1247 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1248 struct arm_smmu_master_cfg *cfg)
1250 struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu;
1253 * We *must* clear the S2CR first, because freeing the SMR means
1254 * that it can be re-allocated immediately.
1256 arm_smmu_bypass_stream_mapping(smmu, cfg);
1257 arm_smmu_master_free_smrs(smmu, cfg);
1260 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1263 struct arm_smmu_domain *smmu_domain = domain->priv;
1264 struct arm_smmu_device *device_smmu;
1265 struct arm_smmu_master_cfg *cfg;
1266 unsigned long flags;
1268 device_smmu = dev_get_master_dev(dev)->archdata.iommu;
1270 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1275 * Sanity check the domain. We don't currently support domains
1276 * that cross between different SMMU chains.
1278 spin_lock_irqsave(&smmu_domain->lock, flags);
1279 if (!smmu_domain->leaf_smmu) {
1280 /* Now that we have a master, we can finalise the domain */
1281 ret = arm_smmu_init_domain_context(domain, dev, device_smmu);
1282 if (IS_ERR_VALUE(ret))
1284 } else if (smmu_domain->leaf_smmu != device_smmu) {
1286 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1287 dev_name(smmu_domain->leaf_smmu->dev),
1288 dev_name(device_smmu->dev));
1291 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1293 /* Looks ok, so add the device to the domain */
1294 cfg = find_smmu_master_cfg(smmu_domain->leaf_smmu, dev);
1298 return arm_smmu_domain_add_master(smmu_domain, cfg);
1301 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1305 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1307 struct arm_smmu_domain *smmu_domain = domain->priv;
1308 struct arm_smmu_master_cfg *cfg;
1310 cfg = find_smmu_master_cfg(smmu_domain->leaf_smmu, dev);
1312 arm_smmu_domain_remove_master(smmu_domain, cfg);
1315 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
1318 return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
1319 (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
1322 static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1323 unsigned long addr, unsigned long end,
1324 unsigned long pfn, int prot, int stage)
1327 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
1329 if (pmd_none(*pmd)) {
1330 /* Allocate a new set of tables */
1331 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1335 arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
1336 if (!pgtable_page_ctor(table)) {
1340 pmd_populate(NULL, pmd, table);
1341 arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
1345 pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
1346 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
1347 pteval |= ARM_SMMU_PTE_AP_RDONLY;
1349 if (prot & IOMMU_CACHE)
1350 pteval |= (MAIR_ATTR_IDX_CACHE <<
1351 ARM_SMMU_PTE_ATTRINDX_SHIFT);
1353 pteval |= ARM_SMMU_PTE_HAP_FAULT;
1354 if (prot & IOMMU_READ)
1355 pteval |= ARM_SMMU_PTE_HAP_READ;
1356 if (prot & IOMMU_WRITE)
1357 pteval |= ARM_SMMU_PTE_HAP_WRITE;
1358 if (prot & IOMMU_CACHE)
1359 pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
1361 pteval |= ARM_SMMU_PTE_MEMATTR_NC;
1364 /* If no access, create a faulting entry to avoid TLB fills */
1365 if (prot & IOMMU_EXEC)
1366 pteval &= ~ARM_SMMU_PTE_XN;
1367 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
1368 pteval &= ~ARM_SMMU_PTE_PAGE;
1370 pteval |= ARM_SMMU_PTE_SH_IS;
1371 start = pmd_page_vaddr(*pmd) + pte_index(addr);
1375 * Install the page table entries. This is fairly complicated
1376 * since we attempt to make use of the contiguous hint in the
1377 * ptes where possible. The contiguous hint indicates a series
1378 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
1379 * contiguous region with the following constraints:
1381 * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
1382 * - Each pte in the region has the contiguous hint bit set
1384 * This complicates unmapping (also handled by this code, when
1385 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
1386 * possible, yet highly unlikely, that a client may unmap only
1387 * part of a contiguous range. This requires clearing of the
1388 * contiguous hint bits in the range before installing the new
1391 * Note that re-mapping an address range without first unmapping
1392 * it is not supported, so TLB invalidation is not required here
1393 * and is instead performed at unmap and domain-init time.
1397 pteval &= ~ARM_SMMU_PTE_CONT;
1399 if (arm_smmu_pte_is_contiguous_range(addr, end)) {
1400 i = ARM_SMMU_PTE_CONT_ENTRIES;
1401 pteval |= ARM_SMMU_PTE_CONT;
1402 } else if (pte_val(*pte) &
1403 (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
1406 unsigned long idx = pte_index(addr);
1408 idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
1409 cont_start = pmd_page_vaddr(*pmd) + idx;
1410 for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
1411 pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT;
1413 arm_smmu_flush_pgtable(smmu, cont_start,
1415 ARM_SMMU_PTE_CONT_ENTRIES);
1419 *pte = pfn_pte(pfn, __pgprot(pteval));
1420 } while (pte++, pfn++, addr += PAGE_SIZE, --i);
1421 } while (addr != end);
1423 arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
1427 static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1428 unsigned long addr, unsigned long end,
1429 phys_addr_t phys, int prot, int stage)
1433 unsigned long next, pfn = __phys_to_pfn(phys);
1435 #ifndef __PAGETABLE_PMD_FOLDED
1436 if (pud_none(*pud)) {
1437 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
1441 arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
1442 pud_populate(NULL, pud, pmd);
1443 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1445 pmd += pmd_index(addr);
1448 pmd = pmd_offset(pud, addr);
1451 next = pmd_addr_end(addr, end);
1452 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
1454 phys += next - addr;
1455 } while (pmd++, addr = next, addr < end);
1460 static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1461 unsigned long addr, unsigned long end,
1462 phys_addr_t phys, int prot, int stage)
1468 #ifndef __PAGETABLE_PUD_FOLDED
1469 if (pgd_none(*pgd)) {
1470 pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
1474 arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
1475 pgd_populate(NULL, pgd, pud);
1476 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1478 pud += pud_index(addr);
1481 pud = pud_offset(pgd, addr);
1484 next = pud_addr_end(addr, end);
1485 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1487 phys += next - addr;
1488 } while (pud++, addr = next, addr < end);
1493 static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1494 unsigned long iova, phys_addr_t paddr,
1495 size_t size, int prot)
1499 phys_addr_t input_mask, output_mask;
1500 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1501 pgd_t *pgd = root_cfg->pgd;
1502 struct arm_smmu_device *smmu = root_cfg->smmu;
1503 unsigned long flags;
1505 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
1507 output_mask = (1ULL << smmu->s2_output_size) - 1;
1510 output_mask = (1ULL << smmu->s1_output_size) - 1;
1516 if (size & ~PAGE_MASK)
1519 input_mask = (1ULL << smmu->input_size) - 1;
1520 if ((phys_addr_t)iova & ~input_mask)
1523 if (paddr & ~output_mask)
1526 spin_lock_irqsave(&smmu_domain->lock, flags);
1527 pgd += pgd_index(iova);
1530 unsigned long next = pgd_addr_end(iova, end);
1532 ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
1537 paddr += next - iova;
1539 } while (pgd++, iova != end);
1542 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1547 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1548 phys_addr_t paddr, size_t size, int prot)
1550 struct arm_smmu_domain *smmu_domain = domain->priv;
1555 /* Check for silent address truncation up the SMMU chain. */
1556 if ((phys_addr_t)iova & ~smmu_domain->output_mask)
1559 return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
1562 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1566 struct arm_smmu_domain *smmu_domain = domain->priv;
1568 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
1569 arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
1570 return ret ? 0 : size;
1573 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1580 struct arm_smmu_domain *smmu_domain = domain->priv;
1581 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1583 pgdp = root_cfg->pgd;
1587 pgd = *(pgdp + pgd_index(iova));
1591 pud = *pud_offset(&pgd, iova);
1595 pmd = *pmd_offset(&pud, iova);
1599 pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
1603 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1606 static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
1609 unsigned long caps = 0;
1610 struct arm_smmu_domain *smmu_domain = domain->priv;
1612 if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1613 caps |= IOMMU_CAP_CACHE_COHERENCY;
1615 return !!(cap & caps);
1618 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1620 *((u16 *)data) = alias;
1621 return 0; /* Continue walking */
1624 static int arm_smmu_add_device(struct device *dev)
1626 struct arm_smmu_device *smmu;
1627 struct iommu_group *group;
1630 if (dev->archdata.iommu) {
1631 dev_warn(dev, "IOMMU driver already assigned to device\n");
1635 smmu = find_parent_smmu_for_device(dev);
1639 group = iommu_group_alloc();
1640 if (IS_ERR(group)) {
1641 dev_err(dev, "Failed to allocate IOMMU group\n");
1642 return PTR_ERR(group);
1645 if (dev_is_pci(dev)) {
1646 struct arm_smmu_master_cfg *cfg;
1647 struct pci_dev *pdev = to_pci_dev(dev);
1649 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1655 cfg->num_streamids = 1;
1657 * Assume Stream ID == Requester ID for now.
1658 * We need a way to describe the ID mappings in FDT.
1660 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
1661 &cfg->streamids[0]);
1662 dev->archdata.iommu = cfg;
1664 dev->archdata.iommu = smmu;
1667 ret = iommu_group_add_device(group, dev);
1670 iommu_group_put(group);
1674 static void arm_smmu_remove_device(struct device *dev)
1676 if (dev_is_pci(dev))
1677 kfree(dev->archdata.iommu);
1679 dev->archdata.iommu = NULL;
1680 iommu_group_remove_device(dev);
1683 static struct iommu_ops arm_smmu_ops = {
1684 .domain_init = arm_smmu_domain_init,
1685 .domain_destroy = arm_smmu_domain_destroy,
1686 .attach_dev = arm_smmu_attach_dev,
1687 .detach_dev = arm_smmu_detach_dev,
1688 .map = arm_smmu_map,
1689 .unmap = arm_smmu_unmap,
1690 .iova_to_phys = arm_smmu_iova_to_phys,
1691 .domain_has_cap = arm_smmu_domain_has_cap,
1692 .add_device = arm_smmu_add_device,
1693 .remove_device = arm_smmu_remove_device,
1694 .pgsize_bitmap = (SECTION_SIZE |
1695 ARM_SMMU_PTE_CONT_SIZE |
1699 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1701 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1702 void __iomem *cb_base;
1706 /* clear global FSR */
1707 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1708 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1710 /* Mark all SMRn as invalid and all S2CRn as bypass */
1711 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1712 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
1713 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
1716 /* Make sure all context banks are disabled and clear CB_FSR */
1717 for (i = 0; i < smmu->num_context_banks; ++i) {
1718 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1719 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1720 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1723 /* Invalidate the TLB, just in case */
1724 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1725 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1726 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1728 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1730 /* Enable fault reporting */
1731 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1733 /* Disable TLB broadcasting. */
1734 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1736 /* Enable client access, but bypass when no mapping is found */
1737 reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
1739 /* Disable forced broadcasting */
1742 /* Don't upgrade barriers */
1743 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1745 /* Push the button */
1746 arm_smmu_tlb_sync(smmu);
1747 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1750 static int arm_smmu_id_size_to_bits(int size)
1769 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1772 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1775 dev_notice(smmu->dev, "probing hardware configuration...\n");
1778 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
1779 smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
1780 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1783 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1784 #ifndef CONFIG_64BIT
1785 if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
1786 dev_err(smmu->dev, "\tno v7 descriptor support!\n");
1790 if (id & ID0_S1TS) {
1791 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1792 dev_notice(smmu->dev, "\tstage 1 translation\n");
1795 if (id & ID0_S2TS) {
1796 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1797 dev_notice(smmu->dev, "\tstage 2 translation\n");
1801 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1802 dev_notice(smmu->dev, "\tnested translation\n");
1805 if (!(smmu->features &
1806 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
1807 ARM_SMMU_FEAT_TRANS_NESTED))) {
1808 dev_err(smmu->dev, "\tno translation support!\n");
1812 if (id & ID0_CTTW) {
1813 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1814 dev_notice(smmu->dev, "\tcoherent table walk\n");
1820 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1821 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1823 if (smmu->num_mapping_groups == 0) {
1825 "stream-matching supported, but no SMRs present!\n");
1829 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1830 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1831 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1832 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1834 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1835 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1836 if ((mask & sid) != sid) {
1838 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1843 dev_notice(smmu->dev,
1844 "\tstream matching with %u register groups, mask 0x%x",
1845 smmu->num_mapping_groups, mask);
1849 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1850 smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
1852 /* Check for size mismatch of SMMU address space from mapped region */
1853 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1854 size *= (smmu->pagesize << 1);
1855 if (smmu->size != size)
1856 dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs "
1857 "from mapped region size (0x%lx)!\n", size, smmu->size);
1859 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
1861 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1862 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1863 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1866 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1867 smmu->num_context_banks, smmu->num_s2_context_banks);
1870 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1871 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1874 * Stage-1 output limited by stage-2 input size due to pgd
1875 * allocation (PTRS_PER_PGD).
1878 smmu->s1_output_size = min((unsigned long)VA_BITS, size);
1880 smmu->s1_output_size = min(32UL, size);
1883 /* The stage-2 output mask is also applied for bypass */
1884 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1885 smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size);
1887 if (smmu->version == 1) {
1888 smmu->input_size = 32;
1891 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1892 size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
1896 smmu->input_size = size;
1898 if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
1899 (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
1900 (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
1901 dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
1907 dev_notice(smmu->dev,
1908 "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
1909 smmu->input_size, smmu->s1_output_size, smmu->s2_output_size);
1913 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1915 struct resource *res;
1916 struct arm_smmu_device *smmu;
1917 struct device_node *dev_node;
1918 struct device *dev = &pdev->dev;
1919 struct rb_node *node;
1920 struct of_phandle_args masterspec;
1921 int num_irqs, i, err;
1923 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1925 dev_err(dev, "failed to allocate arm_smmu_device\n");
1930 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1931 smmu->base = devm_ioremap_resource(dev, res);
1932 if (IS_ERR(smmu->base))
1933 return PTR_ERR(smmu->base);
1934 smmu->size = resource_size(res);
1936 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1937 &smmu->num_global_irqs)) {
1938 dev_err(dev, "missing #global-interrupts property\n");
1943 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1945 if (num_irqs > smmu->num_global_irqs)
1946 smmu->num_context_irqs++;
1949 if (!smmu->num_context_irqs) {
1950 dev_err(dev, "found %d interrupts but expected at least %d\n",
1951 num_irqs, smmu->num_global_irqs + 1);
1955 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1958 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1962 for (i = 0; i < num_irqs; ++i) {
1963 int irq = platform_get_irq(pdev, i);
1965 dev_err(dev, "failed to get irq index %d\n", i);
1968 smmu->irqs[i] = irq;
1972 smmu->masters = RB_ROOT;
1973 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1974 "#stream-id-cells", i,
1976 err = register_smmu_master(smmu, dev, &masterspec);
1978 dev_err(dev, "failed to add master %s\n",
1979 masterspec.np->name);
1980 goto out_put_masters;
1985 dev_notice(dev, "registered %d master devices\n", i);
1987 if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0)))
1988 smmu->parent_of_node = dev_node;
1990 err = arm_smmu_device_cfg_probe(smmu);
1992 goto out_put_parent;
1994 parse_driver_options(smmu);
1996 if (smmu->version > 1 &&
1997 smmu->num_context_banks != smmu->num_context_irqs) {
1999 "found only %d context interrupt(s) but %d required\n",
2000 smmu->num_context_irqs, smmu->num_context_banks);
2002 goto out_put_parent;
2005 for (i = 0; i < smmu->num_global_irqs; ++i) {
2006 err = request_irq(smmu->irqs[i],
2007 arm_smmu_global_fault,
2009 "arm-smmu global fault",
2012 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2018 INIT_LIST_HEAD(&smmu->list);
2019 spin_lock(&arm_smmu_devices_lock);
2020 list_add(&smmu->list, &arm_smmu_devices);
2021 spin_unlock(&arm_smmu_devices_lock);
2023 arm_smmu_device_reset(smmu);
2028 free_irq(smmu->irqs[i], smmu);
2031 if (smmu->parent_of_node)
2032 of_node_put(smmu->parent_of_node);
2035 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2036 struct arm_smmu_master *master;
2037 master = container_of(node, struct arm_smmu_master, node);
2038 of_node_put(master->of_node);
2044 static int arm_smmu_device_remove(struct platform_device *pdev)
2047 struct device *dev = &pdev->dev;
2048 struct arm_smmu_device *curr, *smmu = NULL;
2049 struct rb_node *node;
2051 spin_lock(&arm_smmu_devices_lock);
2052 list_for_each_entry(curr, &arm_smmu_devices, list) {
2053 if (curr->dev == dev) {
2055 list_del(&smmu->list);
2059 spin_unlock(&arm_smmu_devices_lock);
2064 if (smmu->parent_of_node)
2065 of_node_put(smmu->parent_of_node);
2067 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2068 struct arm_smmu_master *master;
2069 master = container_of(node, struct arm_smmu_master, node);
2070 of_node_put(master->of_node);
2073 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2074 dev_err(dev, "removing device with active domains!\n");
2076 for (i = 0; i < smmu->num_global_irqs; ++i)
2077 free_irq(smmu->irqs[i], smmu);
2079 /* Turn the thing off */
2080 writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2085 static struct of_device_id arm_smmu_of_match[] = {
2086 { .compatible = "arm,smmu-v1", },
2087 { .compatible = "arm,smmu-v2", },
2088 { .compatible = "arm,mmu-400", },
2089 { .compatible = "arm,mmu-500", },
2092 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2095 static struct platform_driver arm_smmu_driver = {
2097 .owner = THIS_MODULE,
2099 .of_match_table = of_match_ptr(arm_smmu_of_match),
2101 .probe = arm_smmu_device_dt_probe,
2102 .remove = arm_smmu_device_remove,
2105 static int __init arm_smmu_init(void)
2109 ret = platform_driver_register(&arm_smmu_driver);
2113 /* Oh, for a proper bus abstraction */
2114 if (!iommu_present(&platform_bus_type))
2115 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2117 #ifdef CONFIG_ARM_AMBA
2118 if (!iommu_present(&amba_bustype))
2119 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2123 if (!iommu_present(&pci_bus_type))
2124 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2130 static void __exit arm_smmu_exit(void)
2132 return platform_driver_unregister(&arm_smmu_driver);
2135 subsys_initcall(arm_smmu_init);
2136 module_exit(arm_smmu_exit);
2138 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2139 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2140 MODULE_LICENSE("GPL v2");