blob: 4faee7d23ee9709e72883263181c872ab04259ed [file] [log] [blame]
Will Deaconfdb1d7b2014-11-14 17:16:49 +00001#ifndef __IO_PGTABLE_H
2#define __IO_PGTABLE_H
Robin Murphye5fc9752016-01-26 17:13:13 +00003#include <linux/bitops.h>
Will Deaconfdb1d7b2014-11-14 17:16:49 +00004
5/*
6 * Public API for use by IOMMU drivers
7 */
8enum io_pgtable_fmt {
Will Deacone1d3c0f2014-11-14 17:18:23 +00009 ARM_32_LPAE_S1,
10 ARM_32_LPAE_S2,
11 ARM_64_LPAE_S1,
12 ARM_64_LPAE_S2,
Robin Murphye5fc9752016-01-26 17:13:13 +000013 ARM_V7S,
Will Deaconfdb1d7b2014-11-14 17:16:49 +000014 IO_PGTABLE_NUM_FMTS,
15};
16
17/**
18 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
19 *
20 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
21 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
Robin Murphy87a91b12015-07-29 19:46:09 +010022 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
23 * any corresponding page table updates are visible to the
24 * IOMMU.
Will Deaconfdb1d7b2014-11-14 17:16:49 +000025 *
26 * Note that these can all be called in atomic context and must therefore
27 * not block.
28 */
29struct iommu_gather_ops {
30 void (*tlb_flush_all)(void *cookie);
Robin Murphy06c610e2015-12-07 18:18:53 +000031 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
32 bool leaf, void *cookie);
Will Deaconfdb1d7b2014-11-14 17:16:49 +000033 void (*tlb_sync)(void *cookie);
Will Deaconfdb1d7b2014-11-14 17:16:49 +000034};
35
36/**
37 * struct io_pgtable_cfg - Configuration data for a set of page tables.
38 *
39 * @quirks: A bitmap of hardware quirks that require some special
40 * action by the low-level page table allocator.
41 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
42 * tables.
43 * @ias: Input address (iova) size, in bits.
44 * @oas: Output address (paddr) size, in bits.
45 * @tlb: TLB management callbacks for this set of tables.
Robin Murphyf8d54962015-07-29 19:46:04 +010046 * @iommu_dev: The device representing the DMA configuration for the
47 * page table walker.
Will Deaconfdb1d7b2014-11-14 17:16:49 +000048 */
49struct io_pgtable_cfg {
Robin Murphye5fc9752016-01-26 17:13:13 +000050 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) /* Set NS bit in PTEs */
51 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) /* No AP/XN bits */
52 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) /* TLB Inv. on map */
Laurent Pinchartc896c1322014-12-14 23:34:50 +020053 int quirks;
Will Deaconfdb1d7b2014-11-14 17:16:49 +000054 unsigned long pgsize_bitmap;
55 unsigned int ias;
56 unsigned int oas;
57 const struct iommu_gather_ops *tlb;
Robin Murphyf8d54962015-07-29 19:46:04 +010058 struct device *iommu_dev;
Will Deaconfdb1d7b2014-11-14 17:16:49 +000059
60 /* Low-level data specific to the table format */
61 union {
Will Deacone1d3c0f2014-11-14 17:18:23 +000062 struct {
63 u64 ttbr[2];
64 u64 tcr;
65 u64 mair[2];
66 } arm_lpae_s1_cfg;
67
68 struct {
69 u64 vttbr;
70 u64 vtcr;
71 } arm_lpae_s2_cfg;
Robin Murphye5fc9752016-01-26 17:13:13 +000072
73 struct {
74 u32 ttbr[2];
75 u32 tcr;
76 u32 nmrr;
77 u32 prrr;
78 } arm_v7s_cfg;
Will Deaconfdb1d7b2014-11-14 17:16:49 +000079 };
80};
81
82/**
83 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
84 *
85 * @map: Map a physically contiguous memory region.
86 * @unmap: Unmap a physically contiguous memory region.
87 * @iova_to_phys: Translate iova to physical address.
88 *
89 * These functions map directly onto the iommu_ops member functions with
90 * the same names.
91 */
92struct io_pgtable_ops {
93 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
94 phys_addr_t paddr, size_t size, int prot);
95 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
96 size_t size);
97 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
98 unsigned long iova);
99};
100
101/**
102 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
103 *
104 * @fmt: The page table format.
105 * @cfg: The page table configuration. This will be modified to represent
106 * the configuration actually provided by the allocator (e.g. the
107 * pgsize_bitmap may be restricted).
108 * @cookie: An opaque token provided by the IOMMU driver and passed back to
109 * the callback routines in cfg->tlb.
110 */
111struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
112 struct io_pgtable_cfg *cfg,
113 void *cookie);
114
115/**
116 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
117 * *must* ensure that the page table is no longer
118 * live, but the TLB can be dirty.
119 *
120 * @ops: The ops returned from alloc_io_pgtable_ops.
121 */
122void free_io_pgtable_ops(struct io_pgtable_ops *ops);
123
124
125/*
126 * Internal structures for page table allocator implementations.
127 */
128
129/**
130 * struct io_pgtable - Internal structure describing a set of page tables.
131 *
132 * @fmt: The page table format.
133 * @cookie: An opaque token provided by the IOMMU driver and passed back to
134 * any callback routines.
Robin Murphy88492a42016-01-26 17:13:15 +0000135 * @tlb_sync_pending: Private flag for optimising out redundant syncs.
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000136 * @cfg: A copy of the page table configuration.
137 * @ops: The page table operations in use for this set of page tables.
138 */
139struct io_pgtable {
140 enum io_pgtable_fmt fmt;
141 void *cookie;
Robin Murphy88492a42016-01-26 17:13:15 +0000142 bool tlb_sync_pending;
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000143 struct io_pgtable_cfg cfg;
144 struct io_pgtable_ops ops;
145};
146
Robin Murphyfdc38962015-12-04 17:53:01 +0000147#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
148
Robin Murphy507e4c92016-01-26 17:13:14 +0000149static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
150{
151 iop->cfg.tlb->tlb_flush_all(iop->cookie);
Robin Murphy88492a42016-01-26 17:13:15 +0000152 iop->tlb_sync_pending = true;
Robin Murphy507e4c92016-01-26 17:13:14 +0000153}
154
155static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
156 unsigned long iova, size_t size, size_t granule, bool leaf)
157{
158 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
Robin Murphy88492a42016-01-26 17:13:15 +0000159 iop->tlb_sync_pending = true;
Robin Murphy507e4c92016-01-26 17:13:14 +0000160}
161
162static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
163{
Robin Murphy88492a42016-01-26 17:13:15 +0000164 if (iop->tlb_sync_pending) {
165 iop->cfg.tlb->tlb_sync(iop->cookie);
166 iop->tlb_sync_pending = false;
167 }
Robin Murphy507e4c92016-01-26 17:13:14 +0000168}
169
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000170/**
171 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
172 * particular format.
173 *
174 * @alloc: Allocate a set of page tables described by cfg.
175 * @free: Free the page tables associated with iop.
176 */
177struct io_pgtable_init_fns {
178 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
179 void (*free)(struct io_pgtable *iop);
180};
181
Joerg Roedel2e169bb2015-08-13 12:01:10 +0200182extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
183extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
184extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
185extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
Robin Murphye5fc9752016-01-26 17:13:13 +0000186extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
Joerg Roedel2e169bb2015-08-13 12:01:10 +0200187
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000188#endif /* __IO_PGTABLE_H */