blob: b6398d7285f707ca67214b56e4db727d0bcd0926 [file] [log] [blame]
Joerg Roedele3c495c2011-11-09 12:31:15 +01001/*
2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Joerg Roedel8736b2c2011-11-24 16:21:52 +010019#include <linux/mmu_notifier.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010020#include <linux/amd-iommu.h>
21#include <linux/mm_types.h>
Joerg Roedel8736b2c2011-11-24 16:21:52 +010022#include <linux/profile.h>
Joerg Roedele3c495c2011-11-09 12:31:15 +010023#include <linux/module.h>
Joerg Roedel2d5503b2011-11-24 10:41:57 +010024#include <linux/sched.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010025#include <linux/iommu.h>
Joerg Roedel028eeac2011-11-24 12:48:13 +010026#include <linux/wait.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010027#include <linux/pci.h>
28#include <linux/gfp.h>
29
Joerg Roedel028eeac2011-11-24 12:48:13 +010030#include "amd_iommu_types.h"
Joerg Roedeled96f222011-11-23 17:30:39 +010031#include "amd_iommu_proto.h"
Joerg Roedele3c495c2011-11-09 12:31:15 +010032
33MODULE_LICENSE("GPL v2");
34MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
35
Joerg Roedeled96f222011-11-23 17:30:39 +010036#define MAX_DEVICES 0x10000
37#define PRI_QUEUE_SIZE 512
38
39struct pri_queue {
40 atomic_t inflight;
41 bool finish;
Joerg Roedel028eeac2011-11-24 12:48:13 +010042 int status;
Joerg Roedeled96f222011-11-23 17:30:39 +010043};
44
45struct pasid_state {
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
Joerg Roedeld73a6d72014-06-20 16:14:22 +020048 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
Joerg Roedele79df312014-05-20 23:18:26 +020049 calls */
Joerg Roedeled96f222011-11-23 17:30:39 +010050 struct mm_struct *mm; /* mm_struct for the faults */
Joerg Roedelff6d0cc2014-07-08 12:49:50 +020051 struct mmu_notifier mn; /* mmu_notifier handle */
Joerg Roedeled96f222011-11-23 17:30:39 +010052 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
53 struct device_state *device_state; /* Link to our device_state */
54 int pasid; /* PASID index */
Joerg Roedeld9e16112014-07-09 15:43:11 +020055 bool invalid; /* Used during setup and
56 teardown of the pasid */
Joerg Roedeld73a6d72014-06-20 16:14:22 +020057 spinlock_t lock; /* Protect pri_queues and
58 mmu_notifer_count */
Joerg Roedel028eeac2011-11-24 12:48:13 +010059 wait_queue_head_t wq; /* To wait for count == 0 */
Joerg Roedeled96f222011-11-23 17:30:39 +010060};
61
62struct device_state {
Joerg Roedel741669c2014-05-20 23:18:23 +020063 struct list_head list;
64 u16 devid;
Joerg Roedeled96f222011-11-23 17:30:39 +010065 atomic_t count;
66 struct pci_dev *pdev;
67 struct pasid_state **states;
68 struct iommu_domain *domain;
69 int pasid_levels;
70 int max_pasids;
Joerg Roedel175d6142011-11-28 14:36:36 +010071 amd_iommu_invalid_ppr_cb inv_ppr_cb;
Joerg Roedelbc216622011-12-07 12:24:42 +010072 amd_iommu_invalidate_ctx inv_ctx_cb;
Joerg Roedeled96f222011-11-23 17:30:39 +010073 spinlock_t lock;
Joerg Roedel028eeac2011-11-24 12:48:13 +010074 wait_queue_head_t wq;
75};
76
77struct fault {
78 struct work_struct work;
79 struct device_state *dev_state;
80 struct pasid_state *state;
81 struct mm_struct *mm;
82 u64 address;
83 u16 devid;
84 u16 pasid;
85 u16 tag;
86 u16 finish;
87 u16 flags;
Joerg Roedeled96f222011-11-23 17:30:39 +010088};
89
Joerg Roedel741669c2014-05-20 23:18:23 +020090static LIST_HEAD(state_list);
Joerg Roedeled96f222011-11-23 17:30:39 +010091static spinlock_t state_lock;
92
Joerg Roedel028eeac2011-11-24 12:48:13 +010093static struct workqueue_struct *iommu_wq;
94
Joerg Roedel2d5503b2011-11-24 10:41:57 +010095static void free_pasid_states(struct device_state *dev_state);
Joerg Roedeled96f222011-11-23 17:30:39 +010096
97static u16 device_id(struct pci_dev *pdev)
98{
99 u16 devid;
100
101 devid = pdev->bus->number;
102 devid = (devid << 8) | pdev->devfn;
103
104 return devid;
105}
106
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200107static struct device_state *__get_device_state(u16 devid)
108{
Joerg Roedel741669c2014-05-20 23:18:23 +0200109 struct device_state *dev_state;
110
111 list_for_each_entry(dev_state, &state_list, list) {
112 if (dev_state->devid == devid)
113 return dev_state;
114 }
115
116 return NULL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200117}
118
Joerg Roedeled96f222011-11-23 17:30:39 +0100119static struct device_state *get_device_state(u16 devid)
120{
121 struct device_state *dev_state;
122 unsigned long flags;
123
124 spin_lock_irqsave(&state_lock, flags);
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200125 dev_state = __get_device_state(devid);
Joerg Roedeled96f222011-11-23 17:30:39 +0100126 if (dev_state != NULL)
127 atomic_inc(&dev_state->count);
128 spin_unlock_irqrestore(&state_lock, flags);
129
130 return dev_state;
131}
132
133static void free_device_state(struct device_state *dev_state)
134{
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100135 /*
136 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain.
138 */
Joerg Roedeled96f222011-11-23 17:30:39 +0100139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100140
141 /* Everything is down now, free the IOMMUv2 domain */
Joerg Roedeled96f222011-11-23 17:30:39 +0100142 iommu_domain_free(dev_state->domain);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100143
144 /* Finally get rid of the device-state */
Joerg Roedeled96f222011-11-23 17:30:39 +0100145 kfree(dev_state);
146}
147
148static void put_device_state(struct device_state *dev_state)
149{
150 if (atomic_dec_and_test(&dev_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100151 wake_up(&dev_state->wq);
Joerg Roedeled96f222011-11-23 17:30:39 +0100152}
153
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100154/* Must be called under dev_state->lock */
155static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
156 int pasid, bool alloc)
157{
158 struct pasid_state **root, **ptr;
159 int level, index;
160
161 level = dev_state->pasid_levels;
162 root = dev_state->states;
163
164 while (true) {
165
166 index = (pasid >> (9 * level)) & 0x1ff;
167 ptr = &root[index];
168
169 if (level == 0)
170 break;
171
172 if (*ptr == NULL) {
173 if (!alloc)
174 return NULL;
175
176 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
177 if (*ptr == NULL)
178 return NULL;
179 }
180
181 root = (struct pasid_state **)*ptr;
182 level -= 1;
183 }
184
185 return ptr;
186}
187
188static int set_pasid_state(struct device_state *dev_state,
189 struct pasid_state *pasid_state,
190 int pasid)
191{
192 struct pasid_state **ptr;
193 unsigned long flags;
194 int ret;
195
196 spin_lock_irqsave(&dev_state->lock, flags);
197 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
198
199 ret = -ENOMEM;
200 if (ptr == NULL)
201 goto out_unlock;
202
203 ret = -ENOMEM;
204 if (*ptr != NULL)
205 goto out_unlock;
206
207 *ptr = pasid_state;
208
209 ret = 0;
210
211out_unlock:
212 spin_unlock_irqrestore(&dev_state->lock, flags);
213
214 return ret;
215}
216
217static void clear_pasid_state(struct device_state *dev_state, int pasid)
218{
219 struct pasid_state **ptr;
220 unsigned long flags;
221
222 spin_lock_irqsave(&dev_state->lock, flags);
223 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
224
225 if (ptr == NULL)
226 goto out_unlock;
227
228 *ptr = NULL;
229
230out_unlock:
231 spin_unlock_irqrestore(&dev_state->lock, flags);
232}
233
234static struct pasid_state *get_pasid_state(struct device_state *dev_state,
235 int pasid)
236{
237 struct pasid_state **ptr, *ret = NULL;
238 unsigned long flags;
239
240 spin_lock_irqsave(&dev_state->lock, flags);
241 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
242
243 if (ptr == NULL)
244 goto out_unlock;
245
246 ret = *ptr;
247 if (ret)
248 atomic_inc(&ret->count);
249
250out_unlock:
251 spin_unlock_irqrestore(&dev_state->lock, flags);
252
253 return ret;
254}
255
256static void free_pasid_state(struct pasid_state *pasid_state)
257{
258 kfree(pasid_state);
259}
260
261static void put_pasid_state(struct pasid_state *pasid_state)
262{
Oded Gabbay1c510992014-11-10 12:21:39 +0200263 if (atomic_dec_and_test(&pasid_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100264 wake_up(&pasid_state->wq);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100265}
266
Joerg Roedel028eeac2011-11-24 12:48:13 +0100267static void put_pasid_state_wait(struct pasid_state *pasid_state)
268{
269 DEFINE_WAIT(wait);
270
271 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
272
Oded Gabbay1c510992014-11-10 12:21:39 +0200273 if (!atomic_dec_and_test(&pasid_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100274 schedule();
275
276 finish_wait(&pasid_state->wq, &wait);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100277 free_pasid_state(pasid_state);
278}
279
Joerg Roedel61feb432014-07-08 14:19:35 +0200280static void unbind_pasid(struct pasid_state *pasid_state)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100281{
282 struct iommu_domain *domain;
283
284 domain = pasid_state->device_state->domain;
285
Joerg Roedel53d340e2014-07-08 15:01:43 +0200286 /*
287 * Mark pasid_state as invalid, no more faults will we added to the
288 * work queue after this is visible everywhere.
289 */
290 pasid_state->invalid = true;
291
292 /* Make sure this is visible */
293 smp_wmb();
294
295 /* After this the device/pasid can't access the mm anymore */
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100296 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100297
298 /* Make sure no more pending faults are in the queue */
299 flush_workqueue(iommu_wq);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100300}
301
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100302static void free_pasid_states_level1(struct pasid_state **tbl)
303{
304 int i;
305
306 for (i = 0; i < 512; ++i) {
307 if (tbl[i] == NULL)
308 continue;
309
310 free_page((unsigned long)tbl[i]);
311 }
312}
313
314static void free_pasid_states_level2(struct pasid_state **tbl)
315{
316 struct pasid_state **ptr;
317 int i;
318
319 for (i = 0; i < 512; ++i) {
320 if (tbl[i] == NULL)
321 continue;
322
323 ptr = (struct pasid_state **)tbl[i];
324 free_pasid_states_level1(ptr);
325 }
326}
327
328static void free_pasid_states(struct device_state *dev_state)
329{
330 struct pasid_state *pasid_state;
331 int i;
332
333 for (i = 0; i < dev_state->max_pasids; ++i) {
334 pasid_state = get_pasid_state(dev_state, i);
335 if (pasid_state == NULL)
336 continue;
337
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100338 put_pasid_state(pasid_state);
Joerg Roedela40d4c62014-05-20 23:18:24 +0200339
340 /*
341 * This will call the mn_release function and
342 * unbind the PASID
343 */
344 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedelc5db16a2014-07-08 14:15:45 +0200345
346 put_pasid_state_wait(pasid_state); /* Reference taken in
Joerg Roedeldaff2f92014-07-30 16:04:40 +0200347 amd_iommu_bind_pasid */
Joerg Roedel75058a32014-07-30 16:04:39 +0200348
349 /* Drop reference taken in amd_iommu_bind_pasid */
350 put_device_state(dev_state);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100351 }
352
353 if (dev_state->pasid_levels == 2)
354 free_pasid_states_level2(dev_state->states);
355 else if (dev_state->pasid_levels == 1)
356 free_pasid_states_level1(dev_state->states);
357 else if (dev_state->pasid_levels != 0)
358 BUG();
359
360 free_page((unsigned long)dev_state->states);
361}
362
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100363static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
364{
365 return container_of(mn, struct pasid_state, mn);
366}
367
368static void __mn_flush_page(struct mmu_notifier *mn,
369 unsigned long address)
370{
371 struct pasid_state *pasid_state;
372 struct device_state *dev_state;
373
374 pasid_state = mn_to_state(mn);
375 dev_state = pasid_state->device_state;
376
377 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
378}
379
380static int mn_clear_flush_young(struct mmu_notifier *mn,
381 struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700382 unsigned long start,
383 unsigned long end)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100384{
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700385 for (; start < end; start += PAGE_SIZE)
386 __mn_flush_page(mn, start);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100387
388 return 0;
389}
390
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100391static void mn_invalidate_page(struct mmu_notifier *mn,
392 struct mm_struct *mm,
393 unsigned long address)
394{
395 __mn_flush_page(mn, address);
396}
397
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100398static void mn_invalidate_range(struct mmu_notifier *mn,
399 struct mm_struct *mm,
400 unsigned long start, unsigned long end)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100401{
402 struct pasid_state *pasid_state;
403 struct device_state *dev_state;
404
405 pasid_state = mn_to_state(mn);
406 dev_state = pasid_state->device_state;
407
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100408 if ((start ^ (end - 1)) < PAGE_SIZE)
409 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
410 start);
411 else
412 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100413}
414
Joerg Roedela40d4c62014-05-20 23:18:24 +0200415static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
416{
417 struct pasid_state *pasid_state;
418 struct device_state *dev_state;
Joerg Roedeld9e16112014-07-09 15:43:11 +0200419 bool run_inv_ctx_cb;
Joerg Roedela40d4c62014-05-20 23:18:24 +0200420
421 might_sleep();
422
Joerg Roedeld9e16112014-07-09 15:43:11 +0200423 pasid_state = mn_to_state(mn);
424 dev_state = pasid_state->device_state;
425 run_inv_ctx_cb = !pasid_state->invalid;
Joerg Roedela40d4c62014-05-20 23:18:24 +0200426
Joerg Roedeld9e16112014-07-09 15:43:11 +0200427 if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
Joerg Roedela40d4c62014-05-20 23:18:24 +0200428 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
429
Joerg Roedel61feb432014-07-08 14:19:35 +0200430 unbind_pasid(pasid_state);
Joerg Roedela40d4c62014-05-20 23:18:24 +0200431}
432
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100433static struct mmu_notifier_ops iommu_mn = {
Joerg Roedela40d4c62014-05-20 23:18:24 +0200434 .release = mn_release,
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100435 .clear_flush_young = mn_clear_flush_young,
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100436 .invalidate_page = mn_invalidate_page,
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100437 .invalidate_range = mn_invalidate_range,
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100438};
439
Joerg Roedel028eeac2011-11-24 12:48:13 +0100440static void set_pri_tag_status(struct pasid_state *pasid_state,
441 u16 tag, int status)
442{
443 unsigned long flags;
444
445 spin_lock_irqsave(&pasid_state->lock, flags);
446 pasid_state->pri[tag].status = status;
447 spin_unlock_irqrestore(&pasid_state->lock, flags);
448}
449
450static void finish_pri_tag(struct device_state *dev_state,
451 struct pasid_state *pasid_state,
452 u16 tag)
453{
454 unsigned long flags;
455
456 spin_lock_irqsave(&pasid_state->lock, flags);
457 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
458 pasid_state->pri[tag].finish) {
459 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
460 pasid_state->pri[tag].status, tag);
461 pasid_state->pri[tag].finish = false;
462 pasid_state->pri[tag].status = PPR_SUCCESS;
463 }
464 spin_unlock_irqrestore(&pasid_state->lock, flags);
465}
466
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800467static void handle_fault_error(struct fault *fault)
468{
469 int status;
470
471 if (!fault->dev_state->inv_ppr_cb) {
472 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
473 return;
474 }
475
476 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
477 fault->pasid,
478 fault->address,
479 fault->flags);
480 switch (status) {
481 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
482 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
483 break;
484 case AMD_IOMMU_INV_PRI_RSP_INVALID:
485 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
486 break;
487 case AMD_IOMMU_INV_PRI_RSP_FAIL:
488 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
489 break;
490 default:
491 BUG();
492 }
493}
494
Joerg Roedel028eeac2011-11-24 12:48:13 +0100495static void do_fault(struct work_struct *work)
496{
497 struct fault *fault = container_of(work, struct fault, work);
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800498 struct mm_struct *mm;
499 struct vm_area_struct *vma;
500 u64 address;
501 int ret, write;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100502
503 write = !!(fault->flags & PPR_FAULT_WRITE);
504
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800505 mm = fault->state->mm;
506 address = fault->address;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100507
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800508 down_read(&mm->mmap_sem);
509 vma = find_extend_vma(mm, address);
510 if (!vma || address < vma->vm_start) {
511 /* failed to get a vma in the right range */
512 up_read(&mm->mmap_sem);
513 handle_fault_error(fault);
514 goto out;
Joerg Roedel175d6142011-11-28 14:36:36 +0100515 }
Joerg Roedel028eeac2011-11-24 12:48:13 +0100516
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800517 ret = handle_mm_fault(mm, vma, address, write);
518 if (ret & VM_FAULT_ERROR) {
519 /* failed to service fault */
520 up_read(&mm->mmap_sem);
521 handle_fault_error(fault);
522 goto out;
523 }
524
525 up_read(&mm->mmap_sem);
526
527out:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100528 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
529
530 put_pasid_state(fault->state);
531
532 kfree(fault);
533}
534
535static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
536{
537 struct amd_iommu_fault *iommu_fault;
538 struct pasid_state *pasid_state;
539 struct device_state *dev_state;
540 unsigned long flags;
541 struct fault *fault;
542 bool finish;
543 u16 tag;
544 int ret;
545
546 iommu_fault = data;
547 tag = iommu_fault->tag & 0x1ff;
548 finish = (iommu_fault->tag >> 9) & 1;
549
550 ret = NOTIFY_DONE;
551 dev_state = get_device_state(iommu_fault->device_id);
552 if (dev_state == NULL)
553 goto out;
554
555 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
Joerg Roedel53d340e2014-07-08 15:01:43 +0200556 if (pasid_state == NULL || pasid_state->invalid) {
Joerg Roedel028eeac2011-11-24 12:48:13 +0100557 /* We know the device but not the PASID -> send INVALID */
558 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
559 PPR_INVALID, tag);
560 goto out_drop_state;
561 }
562
563 spin_lock_irqsave(&pasid_state->lock, flags);
564 atomic_inc(&pasid_state->pri[tag].inflight);
565 if (finish)
566 pasid_state->pri[tag].finish = true;
567 spin_unlock_irqrestore(&pasid_state->lock, flags);
568
569 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
570 if (fault == NULL) {
571 /* We are OOM - send success and let the device re-fault */
572 finish_pri_tag(dev_state, pasid_state, tag);
573 goto out_drop_state;
574 }
575
576 fault->dev_state = dev_state;
577 fault->address = iommu_fault->address;
578 fault->state = pasid_state;
579 fault->tag = tag;
580 fault->finish = finish;
Alexey Skidanovb00675b2014-07-08 17:30:16 +0300581 fault->pasid = iommu_fault->pasid;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100582 fault->flags = iommu_fault->flags;
583 INIT_WORK(&fault->work, do_fault);
584
585 queue_work(iommu_wq, &fault->work);
586
587 ret = NOTIFY_OK;
588
589out_drop_state:
Joerg Roedeldc88db72014-07-08 14:55:10 +0200590
591 if (ret != NOTIFY_OK && pasid_state)
592 put_pasid_state(pasid_state);
593
Joerg Roedel028eeac2011-11-24 12:48:13 +0100594 put_device_state(dev_state);
595
596out:
597 return ret;
598}
599
600static struct notifier_block ppr_nb = {
601 .notifier_call = ppr_notifier,
602};
603
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100604int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
605 struct task_struct *task)
606{
607 struct pasid_state *pasid_state;
608 struct device_state *dev_state;
Joerg Roedelf0aac632014-07-08 15:15:07 +0200609 struct mm_struct *mm;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100610 u16 devid;
611 int ret;
612
613 might_sleep();
614
615 if (!amd_iommu_v2_supported())
616 return -ENODEV;
617
618 devid = device_id(pdev);
619 dev_state = get_device_state(devid);
620
621 if (dev_state == NULL)
622 return -EINVAL;
623
624 ret = -EINVAL;
625 if (pasid < 0 || pasid >= dev_state->max_pasids)
626 goto out;
627
628 ret = -ENOMEM;
629 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
630 if (pasid_state == NULL)
631 goto out;
632
Joerg Roedelf0aac632014-07-08 15:15:07 +0200633
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100634 atomic_set(&pasid_state->count, 1);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100635 init_waitqueue_head(&pasid_state->wq);
Joerg Roedel2c13d472012-07-19 10:56:10 +0200636 spin_lock_init(&pasid_state->lock);
637
Joerg Roedelf0aac632014-07-08 15:15:07 +0200638 mm = get_task_mm(task);
Joerg Roedelf0aac632014-07-08 15:15:07 +0200639 pasid_state->mm = mm;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100640 pasid_state->device_state = dev_state;
641 pasid_state->pasid = pasid;
Joerg Roedeld9e16112014-07-09 15:43:11 +0200642 pasid_state->invalid = true; /* Mark as valid only if we are
643 done with setting up the pasid */
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100644 pasid_state->mn.ops = &iommu_mn;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100645
646 if (pasid_state->mm == NULL)
647 goto out_free;
648
Joerg Roedelf0aac632014-07-08 15:15:07 +0200649 mmu_notifier_register(&pasid_state->mn, mm);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100650
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100651 ret = set_pasid_state(dev_state, pasid_state, pasid);
652 if (ret)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100653 goto out_unregister;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100654
655 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
656 __pa(pasid_state->mm->pgd));
657 if (ret)
658 goto out_clear_state;
659
Joerg Roedeld9e16112014-07-09 15:43:11 +0200660 /* Now we are ready to handle faults */
661 pasid_state->invalid = false;
662
Joerg Roedelf0aac632014-07-08 15:15:07 +0200663 /*
664 * Drop the reference to the mm_struct here. We rely on the
665 * mmu_notifier release call-back to inform us when the mm
666 * is going away.
667 */
668 mmput(mm);
669
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100670 return 0;
671
672out_clear_state:
673 clear_pasid_state(dev_state, pasid);
674
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100675out_unregister:
Joerg Roedelf0aac632014-07-08 15:15:07 +0200676 mmu_notifier_unregister(&pasid_state->mn, mm);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100677
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100678out_free:
Joerg Roedelf0aac632014-07-08 15:15:07 +0200679 mmput(mm);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100680 free_pasid_state(pasid_state);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100681
682out:
683 put_device_state(dev_state);
684
685 return ret;
686}
687EXPORT_SYMBOL(amd_iommu_bind_pasid);
688
689void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
690{
Joerg Roedela40d4c62014-05-20 23:18:24 +0200691 struct pasid_state *pasid_state;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100692 struct device_state *dev_state;
693 u16 devid;
694
695 might_sleep();
696
697 if (!amd_iommu_v2_supported())
698 return;
699
700 devid = device_id(pdev);
701 dev_state = get_device_state(devid);
702 if (dev_state == NULL)
703 return;
704
705 if (pasid < 0 || pasid >= dev_state->max_pasids)
706 goto out;
707
Joerg Roedela40d4c62014-05-20 23:18:24 +0200708 pasid_state = get_pasid_state(dev_state, pasid);
709 if (pasid_state == NULL)
710 goto out;
711 /*
712 * Drop reference taken here. We are safe because we still hold
713 * the reference taken in the amd_iommu_bind_pasid function.
714 */
715 put_pasid_state(pasid_state);
716
Joerg Roedel53d340e2014-07-08 15:01:43 +0200717 /* Clear the pasid state so that the pasid can be re-used */
718 clear_pasid_state(dev_state, pasid_state->pasid);
719
Joerg Roedelf0aac632014-07-08 15:15:07 +0200720 /*
Joerg Roedelfcaa9602014-07-30 16:04:37 +0200721 * Call mmu_notifier_unregister to drop our reference
722 * to pasid_state->mm
Joerg Roedelf0aac632014-07-08 15:15:07 +0200723 */
Joerg Roedelfcaa9602014-07-30 16:04:37 +0200724 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100725
Joerg Roedelc5db16a2014-07-08 14:15:45 +0200726 put_pasid_state_wait(pasid_state); /* Reference taken in
Joerg Roedeldaff2f92014-07-30 16:04:40 +0200727 amd_iommu_bind_pasid */
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100728out:
Joerg Roedel75058a32014-07-30 16:04:39 +0200729 /* Drop reference taken in this function */
730 put_device_state(dev_state);
731
732 /* Drop reference taken in amd_iommu_bind_pasid */
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100733 put_device_state(dev_state);
734}
735EXPORT_SYMBOL(amd_iommu_unbind_pasid);
736
Joerg Roedeled96f222011-11-23 17:30:39 +0100737int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
738{
739 struct device_state *dev_state;
740 unsigned long flags;
741 int ret, tmp;
742 u16 devid;
743
744 might_sleep();
745
746 if (!amd_iommu_v2_supported())
747 return -ENODEV;
748
749 if (pasids <= 0 || pasids > (PASID_MASK + 1))
750 return -EINVAL;
751
752 devid = device_id(pdev);
753
754 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
755 if (dev_state == NULL)
756 return -ENOMEM;
757
758 spin_lock_init(&dev_state->lock);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100759 init_waitqueue_head(&dev_state->wq);
Joerg Roedel741669c2014-05-20 23:18:23 +0200760 dev_state->pdev = pdev;
761 dev_state->devid = devid;
Joerg Roedeled96f222011-11-23 17:30:39 +0100762
763 tmp = pasids;
764 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
765 dev_state->pasid_levels += 1;
766
767 atomic_set(&dev_state->count, 1);
768 dev_state->max_pasids = pasids;
769
770 ret = -ENOMEM;
771 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
772 if (dev_state->states == NULL)
773 goto out_free_dev_state;
774
775 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
776 if (dev_state->domain == NULL)
777 goto out_free_states;
778
779 amd_iommu_domain_direct_map(dev_state->domain);
780
781 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
782 if (ret)
783 goto out_free_domain;
784
785 ret = iommu_attach_device(dev_state->domain, &pdev->dev);
786 if (ret != 0)
787 goto out_free_domain;
788
789 spin_lock_irqsave(&state_lock, flags);
790
Joerg Roedel741669c2014-05-20 23:18:23 +0200791 if (__get_device_state(devid) != NULL) {
Joerg Roedeled96f222011-11-23 17:30:39 +0100792 spin_unlock_irqrestore(&state_lock, flags);
793 ret = -EBUSY;
794 goto out_free_domain;
795 }
796
Joerg Roedel741669c2014-05-20 23:18:23 +0200797 list_add_tail(&dev_state->list, &state_list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100798
799 spin_unlock_irqrestore(&state_lock, flags);
800
801 return 0;
802
803out_free_domain:
804 iommu_domain_free(dev_state->domain);
805
806out_free_states:
807 free_page((unsigned long)dev_state->states);
808
809out_free_dev_state:
810 kfree(dev_state);
811
812 return ret;
813}
814EXPORT_SYMBOL(amd_iommu_init_device);
815
816void amd_iommu_free_device(struct pci_dev *pdev)
817{
818 struct device_state *dev_state;
819 unsigned long flags;
820 u16 devid;
821
822 if (!amd_iommu_v2_supported())
823 return;
824
825 devid = device_id(pdev);
826
827 spin_lock_irqsave(&state_lock, flags);
828
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200829 dev_state = __get_device_state(devid);
Joerg Roedeled96f222011-11-23 17:30:39 +0100830 if (dev_state == NULL) {
831 spin_unlock_irqrestore(&state_lock, flags);
832 return;
833 }
834
Joerg Roedel741669c2014-05-20 23:18:23 +0200835 list_del(&dev_state->list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100836
837 spin_unlock_irqrestore(&state_lock, flags);
838
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100839 /* Get rid of any remaining pasid states */
840 free_pasid_states(dev_state);
841
Peter Zijlstra91f65fa2015-02-03 13:25:51 +0100842 put_device_state(dev_state);
843 /*
844 * Wait until the last reference is dropped before freeing
845 * the device state.
846 */
847 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
848 free_device_state(dev_state);
Joerg Roedeled96f222011-11-23 17:30:39 +0100849}
850EXPORT_SYMBOL(amd_iommu_free_device);
851
Joerg Roedel175d6142011-11-28 14:36:36 +0100852int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
853 amd_iommu_invalid_ppr_cb cb)
854{
855 struct device_state *dev_state;
856 unsigned long flags;
857 u16 devid;
858 int ret;
859
860 if (!amd_iommu_v2_supported())
861 return -ENODEV;
862
863 devid = device_id(pdev);
864
865 spin_lock_irqsave(&state_lock, flags);
866
867 ret = -EINVAL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200868 dev_state = __get_device_state(devid);
Joerg Roedel175d6142011-11-28 14:36:36 +0100869 if (dev_state == NULL)
870 goto out_unlock;
871
872 dev_state->inv_ppr_cb = cb;
873
874 ret = 0;
875
876out_unlock:
877 spin_unlock_irqrestore(&state_lock, flags);
878
879 return ret;
880}
881EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
882
Joerg Roedelbc216622011-12-07 12:24:42 +0100883int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
884 amd_iommu_invalidate_ctx cb)
885{
886 struct device_state *dev_state;
887 unsigned long flags;
888 u16 devid;
889 int ret;
890
891 if (!amd_iommu_v2_supported())
892 return -ENODEV;
893
894 devid = device_id(pdev);
895
896 spin_lock_irqsave(&state_lock, flags);
897
898 ret = -EINVAL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200899 dev_state = __get_device_state(devid);
Joerg Roedelbc216622011-12-07 12:24:42 +0100900 if (dev_state == NULL)
901 goto out_unlock;
902
903 dev_state->inv_ctx_cb = cb;
904
905 ret = 0;
906
907out_unlock:
908 spin_unlock_irqrestore(&state_lock, flags);
909
910 return ret;
911}
912EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
913
Joerg Roedele3c495c2011-11-09 12:31:15 +0100914static int __init amd_iommu_v2_init(void)
915{
Joerg Roedel028eeac2011-11-24 12:48:13 +0100916 int ret;
Joerg Roedeled96f222011-11-23 17:30:39 +0100917
Joerg Roedel474d567d2012-03-15 12:46:40 +0100918 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
919
920 if (!amd_iommu_v2_supported()) {
Masanari Iida07db0402012-07-22 02:21:32 +0900921 pr_info("AMD IOMMUv2 functionality not available on this system\n");
Joerg Roedel474d567d2012-03-15 12:46:40 +0100922 /*
923 * Load anyway to provide the symbols to other modules
924 * which may use AMD IOMMUv2 optionally.
925 */
926 return 0;
927 }
Joerg Roedele3c495c2011-11-09 12:31:15 +0100928
Joerg Roedeled96f222011-11-23 17:30:39 +0100929 spin_lock_init(&state_lock);
930
Joerg Roedel028eeac2011-11-24 12:48:13 +0100931 ret = -ENOMEM;
932 iommu_wq = create_workqueue("amd_iommu_v2");
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100933 if (iommu_wq == NULL)
Joerg Roedel741669c2014-05-20 23:18:23 +0200934 goto out;
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100935
Joerg Roedel028eeac2011-11-24 12:48:13 +0100936 amd_iommu_register_ppr_notifier(&ppr_nb);
937
Joerg Roedele3c495c2011-11-09 12:31:15 +0100938 return 0;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100939
Joerg Roedel741669c2014-05-20 23:18:23 +0200940out:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100941 return ret;
Joerg Roedele3c495c2011-11-09 12:31:15 +0100942}
943
944static void __exit amd_iommu_v2_exit(void)
945{
Joerg Roedeled96f222011-11-23 17:30:39 +0100946 struct device_state *dev_state;
Joerg Roedeled96f222011-11-23 17:30:39 +0100947 int i;
948
Joerg Roedel474d567d2012-03-15 12:46:40 +0100949 if (!amd_iommu_v2_supported())
950 return;
951
Joerg Roedel028eeac2011-11-24 12:48:13 +0100952 amd_iommu_unregister_ppr_notifier(&ppr_nb);
953
954 flush_workqueue(iommu_wq);
955
956 /*
957 * The loop below might call flush_workqueue(), so call
958 * destroy_workqueue() after it
959 */
Joerg Roedeled96f222011-11-23 17:30:39 +0100960 for (i = 0; i < MAX_DEVICES; ++i) {
961 dev_state = get_device_state(i);
962
963 if (dev_state == NULL)
964 continue;
965
966 WARN_ON_ONCE(1);
967
Joerg Roedeled96f222011-11-23 17:30:39 +0100968 put_device_state(dev_state);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100969 amd_iommu_free_device(dev_state->pdev);
Joerg Roedeled96f222011-11-23 17:30:39 +0100970 }
971
Joerg Roedel028eeac2011-11-24 12:48:13 +0100972 destroy_workqueue(iommu_wq);
Joerg Roedele3c495c2011-11-09 12:31:15 +0100973}
974
975module_init(amd_iommu_v2_init);
976module_exit(amd_iommu_v2_exit);