blob: dbcdd6bfa63a60e1c87b1fee1aedfac89ddb07bd [file] [log] [blame]
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
mark gross98bcef52008-02-23 15:23:35 -08006 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07008 */
9
10#include "iova.h"
11
12void
David Millerf6611972008-02-06 01:36:23 -080013init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070014{
15 spin_lock_init(&iovad->iova_alloc_lock);
16 spin_lock_init(&iovad->iova_rbtree_lock);
17 iovad->rbroot = RB_ROOT;
18 iovad->cached32_node = NULL;
David Millerf6611972008-02-06 01:36:23 -080019 iovad->dma_32bit_pfn = pfn_32bit;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070020}
21
22static struct rb_node *
23__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
24{
David Millerf6611972008-02-06 01:36:23 -080025 if ((*limit_pfn != iovad->dma_32bit_pfn) ||
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070026 (iovad->cached32_node == NULL))
27 return rb_last(&iovad->rbroot);
28 else {
29 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
30 struct iova *curr_iova =
31 container_of(iovad->cached32_node, struct iova, node);
32 *limit_pfn = curr_iova->pfn_lo - 1;
33 return prev_node;
34 }
35}
36
37static void
38__cached_rbnode_insert_update(struct iova_domain *iovad,
39 unsigned long limit_pfn, struct iova *new)
40{
David Millerf6611972008-02-06 01:36:23 -080041 if (limit_pfn != iovad->dma_32bit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070042 return;
43 iovad->cached32_node = &new->node;
44}
45
46static void
47__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
48{
49 struct iova *cached_iova;
50 struct rb_node *curr;
51
52 if (!iovad->cached32_node)
53 return;
54 curr = iovad->cached32_node;
55 cached_iova = container_of(curr, struct iova, node);
56
57 if (free->pfn_lo >= cached_iova->pfn_lo)
58 iovad->cached32_node = rb_next(&free->node);
59}
60
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -070061/* Computes the padding size required, to make the
62 * the start address naturally aligned on its size
63 */
64static int
65iova_get_pad_size(int size, unsigned int limit_pfn)
66{
67 unsigned int pad_size = 0;
68 unsigned int order = ilog2(size);
69
70 if (order)
71 pad_size = (limit_pfn + 1) % (1 << order);
72
73 return pad_size;
74}
75
76static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
77 unsigned long limit_pfn, struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070078{
79 struct rb_node *curr = NULL;
80 unsigned long flags;
81 unsigned long saved_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -070082 unsigned int pad_size = 0;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070083
84 /* Walk the tree backwards */
85 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
86 saved_pfn = limit_pfn;
87 curr = __get_cached_rbnode(iovad, &limit_pfn);
88 while (curr) {
89 struct iova *curr_iova = container_of(curr, struct iova, node);
90 if (limit_pfn < curr_iova->pfn_lo)
91 goto move_left;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -070092 else if (limit_pfn < curr_iova->pfn_hi)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070093 goto adjust_limit_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -070094 else {
95 if (size_aligned)
96 pad_size = iova_get_pad_size(size, limit_pfn);
97 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
98 break; /* found a free slot */
99 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700100adjust_limit_pfn:
101 limit_pfn = curr_iova->pfn_lo - 1;
102move_left:
103 curr = rb_prev(curr);
104 }
105
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700106 if (!curr) {
107 if (size_aligned)
108 pad_size = iova_get_pad_size(size, limit_pfn);
109 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
110 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
111 return -ENOMEM;
112 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700113 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700114
115 /* pfn_lo will point to size aligned address if size_aligned is set */
116 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
117 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700118
119 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
120 return 0;
121}
122
123static void
124iova_insert_rbtree(struct rb_root *root, struct iova *iova)
125{
126 struct rb_node **new = &(root->rb_node), *parent = NULL;
127 /* Figure out where to put new node */
128 while (*new) {
129 struct iova *this = container_of(*new, struct iova, node);
130 parent = *new;
131
132 if (iova->pfn_lo < this->pfn_lo)
133 new = &((*new)->rb_left);
134 else if (iova->pfn_lo > this->pfn_lo)
135 new = &((*new)->rb_right);
136 else
137 BUG(); /* this should not happen */
138 }
139 /* Add new node and rebalance tree. */
140 rb_link_node(&iova->node, parent, new);
141 rb_insert_color(&iova->node, root);
142}
143
144/**
145 * alloc_iova - allocates an iova
146 * @iovad - iova domain in question
147 * @size - size of page frames to allocate
148 * @limit_pfn - max limit address
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700149 * @size_aligned - set if size_aligned address range is required
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700150 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700151 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
152 * flag is set then the allocated address iova->pfn_lo will be naturally
153 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700154 */
155struct iova *
156alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700157 unsigned long limit_pfn,
158 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700159{
160 unsigned long flags;
161 struct iova *new_iova;
162 int ret;
163
164 new_iova = alloc_iova_mem();
165 if (!new_iova)
166 return NULL;
167
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700168 /* If size aligned is set then round the size to
169 * to next power of two.
170 */
171 if (size_aligned)
172 size = __roundup_pow_of_two(size);
173
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700174 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700175 ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
176 size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700177
178 if (ret) {
179 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
180 free_iova_mem(new_iova);
181 return NULL;
182 }
183
184 /* Insert the new_iova into domain rbtree by holding writer lock */
185 spin_lock(&iovad->iova_rbtree_lock);
186 iova_insert_rbtree(&iovad->rbroot, new_iova);
187 __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
188 spin_unlock(&iovad->iova_rbtree_lock);
189
190 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
191
192 return new_iova;
193}
194
195/**
196 * find_iova - find's an iova for a given pfn
197 * @iovad - iova domain in question.
198 * pfn - page frame number
199 * This function finds and returns an iova belonging to the
200 * given doamin which matches the given pfn.
201 */
202struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
203{
204 unsigned long flags;
205 struct rb_node *node;
206
207 /* Take the lock so that no other thread is manipulating the rbtree */
208 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
209 node = iovad->rbroot.rb_node;
210 while (node) {
211 struct iova *iova = container_of(node, struct iova, node);
212
213 /* If pfn falls within iova's range, return iova */
214 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
215 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
216 /* We are not holding the lock while this iova
217 * is referenced by the caller as the same thread
218 * which called this function also calls __free_iova()
219 * and it is by desing that only one thread can possibly
220 * reference a particular iova and hence no conflict.
221 */
222 return iova;
223 }
224
225 if (pfn < iova->pfn_lo)
226 node = node->rb_left;
227 else if (pfn > iova->pfn_lo)
228 node = node->rb_right;
229 }
230
231 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
232 return NULL;
233}
234
235/**
236 * __free_iova - frees the given iova
237 * @iovad: iova domain in question.
238 * @iova: iova in question.
239 * Frees the given iova belonging to the giving domain
240 */
241void
242__free_iova(struct iova_domain *iovad, struct iova *iova)
243{
244 unsigned long flags;
245
246 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
247 __cached_rbnode_delete_update(iovad, iova);
248 rb_erase(&iova->node, &iovad->rbroot);
249 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
250 free_iova_mem(iova);
251}
252
253/**
254 * free_iova - finds and frees the iova for a given pfn
255 * @iovad: - iova domain in question.
256 * @pfn: - pfn that is allocated previously
257 * This functions finds an iova for a given pfn and then
258 * frees the iova from that domain.
259 */
260void
261free_iova(struct iova_domain *iovad, unsigned long pfn)
262{
263 struct iova *iova = find_iova(iovad, pfn);
264 if (iova)
265 __free_iova(iovad, iova);
266
267}
268
269/**
270 * put_iova_domain - destroys the iova doamin
271 * @iovad: - iova domain in question.
272 * All the iova's in that domain are destroyed.
273 */
274void put_iova_domain(struct iova_domain *iovad)
275{
276 struct rb_node *node;
277 unsigned long flags;
278
279 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
280 node = rb_first(&iovad->rbroot);
281 while (node) {
282 struct iova *iova = container_of(node, struct iova, node);
283 rb_erase(node, &iovad->rbroot);
284 free_iova_mem(iova);
285 node = rb_first(&iovad->rbroot);
286 }
287 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
288}
289
290static int
291__is_range_overlap(struct rb_node *node,
292 unsigned long pfn_lo, unsigned long pfn_hi)
293{
294 struct iova *iova = container_of(node, struct iova, node);
295
296 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
297 return 1;
298 return 0;
299}
300
301static struct iova *
302__insert_new_range(struct iova_domain *iovad,
303 unsigned long pfn_lo, unsigned long pfn_hi)
304{
305 struct iova *iova;
306
307 iova = alloc_iova_mem();
308 if (!iova)
309 return iova;
310
311 iova->pfn_hi = pfn_hi;
312 iova->pfn_lo = pfn_lo;
313 iova_insert_rbtree(&iovad->rbroot, iova);
314 return iova;
315}
316
317static void
318__adjust_overlap_range(struct iova *iova,
319 unsigned long *pfn_lo, unsigned long *pfn_hi)
320{
321 if (*pfn_lo < iova->pfn_lo)
322 iova->pfn_lo = *pfn_lo;
323 if (*pfn_hi > iova->pfn_hi)
324 *pfn_lo = iova->pfn_hi + 1;
325}
326
327/**
328 * reserve_iova - reserves an iova in the given range
329 * @iovad: - iova domain pointer
330 * @pfn_lo: - lower page frame address
331 * @pfn_hi:- higher pfn adderss
332 * This function allocates reserves the address range from pfn_lo to pfn_hi so
333 * that this address is not dished out as part of alloc_iova.
334 */
335struct iova *
336reserve_iova(struct iova_domain *iovad,
337 unsigned long pfn_lo, unsigned long pfn_hi)
338{
339 struct rb_node *node;
340 unsigned long flags;
341 struct iova *iova;
342 unsigned int overlap = 0;
343
344 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
345 spin_lock(&iovad->iova_rbtree_lock);
346 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
347 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
348 iova = container_of(node, struct iova, node);
349 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
350 if ((pfn_lo >= iova->pfn_lo) &&
351 (pfn_hi <= iova->pfn_hi))
352 goto finish;
353 overlap = 1;
354
355 } else if (overlap)
356 break;
357 }
358
359 /* We are here either becasue this is the first reserver node
360 * or need to insert remaining non overlap addr range
361 */
362 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
363finish:
364
365 spin_unlock(&iovad->iova_rbtree_lock);
366 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
367 return iova;
368}
369
370/**
371 * copy_reserved_iova - copies the reserved between domains
372 * @from: - source doamin from where to copy
373 * @to: - destination domin where to copy
374 * This function copies reserved iova's from one doamin to
375 * other.
376 */
377void
378copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
379{
380 unsigned long flags;
381 struct rb_node *node;
382
383 spin_lock_irqsave(&from->iova_alloc_lock, flags);
384 spin_lock(&from->iova_rbtree_lock);
385 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
386 struct iova *iova = container_of(node, struct iova, node);
387 struct iova *new_iova;
388 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
389 if (!new_iova)
390 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
391 iova->pfn_lo, iova->pfn_lo);
392 }
393 spin_unlock(&from->iova_rbtree_lock);
394 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
395}