blob: 09d342bf0f4724082644e2b58c9ffbfcf18f30fb [file] [log] [blame]
Joerg Roedele3c495c2011-11-09 12:31:15 +01001/*
2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Joerg Roedel8736b2c2011-11-24 16:21:52 +010019#include <linux/mmu_notifier.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010020#include <linux/amd-iommu.h>
21#include <linux/mm_types.h>
Joerg Roedel8736b2c2011-11-24 16:21:52 +010022#include <linux/profile.h>
Joerg Roedele3c495c2011-11-09 12:31:15 +010023#include <linux/module.h>
Joerg Roedel2d5503b2011-11-24 10:41:57 +010024#include <linux/sched.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010025#include <linux/iommu.h>
Joerg Roedel028eeac2011-11-24 12:48:13 +010026#include <linux/wait.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010027#include <linux/pci.h>
28#include <linux/gfp.h>
29
Joerg Roedel028eeac2011-11-24 12:48:13 +010030#include "amd_iommu_types.h"
Joerg Roedeled96f222011-11-23 17:30:39 +010031#include "amd_iommu_proto.h"
Joerg Roedele3c495c2011-11-09 12:31:15 +010032
33MODULE_LICENSE("GPL v2");
34MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
35
Joerg Roedeled96f222011-11-23 17:30:39 +010036#define MAX_DEVICES 0x10000
37#define PRI_QUEUE_SIZE 512
38
39struct pri_queue {
40 atomic_t inflight;
41 bool finish;
Joerg Roedel028eeac2011-11-24 12:48:13 +010042 int status;
Joerg Roedeled96f222011-11-23 17:30:39 +010043};
44
45struct pasid_state {
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 struct task_struct *task; /* Task bound to this PASID */
49 struct mm_struct *mm; /* mm_struct for the faults */
Joerg Roedel8736b2c2011-11-24 16:21:52 +010050 struct mmu_notifier mn; /* mmu_otifier handle */
Joerg Roedeled96f222011-11-23 17:30:39 +010051 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
52 struct device_state *device_state; /* Link to our device_state */
53 int pasid; /* PASID index */
Joerg Roedel028eeac2011-11-24 12:48:13 +010054 spinlock_t lock; /* Protect pri_queues */
55 wait_queue_head_t wq; /* To wait for count == 0 */
Joerg Roedeled96f222011-11-23 17:30:39 +010056};
57
58struct device_state {
Joerg Roedel741669c2014-05-20 23:18:23 +020059 struct list_head list;
60 u16 devid;
Joerg Roedeled96f222011-11-23 17:30:39 +010061 atomic_t count;
62 struct pci_dev *pdev;
63 struct pasid_state **states;
64 struct iommu_domain *domain;
65 int pasid_levels;
66 int max_pasids;
Joerg Roedel175d6142011-11-28 14:36:36 +010067 amd_iommu_invalid_ppr_cb inv_ppr_cb;
Joerg Roedelbc216622011-12-07 12:24:42 +010068 amd_iommu_invalidate_ctx inv_ctx_cb;
Joerg Roedeled96f222011-11-23 17:30:39 +010069 spinlock_t lock;
Joerg Roedel028eeac2011-11-24 12:48:13 +010070 wait_queue_head_t wq;
71};
72
73struct fault {
74 struct work_struct work;
75 struct device_state *dev_state;
76 struct pasid_state *state;
77 struct mm_struct *mm;
78 u64 address;
79 u16 devid;
80 u16 pasid;
81 u16 tag;
82 u16 finish;
83 u16 flags;
Joerg Roedeled96f222011-11-23 17:30:39 +010084};
85
Joerg Roedel741669c2014-05-20 23:18:23 +020086static LIST_HEAD(state_list);
Joerg Roedeled96f222011-11-23 17:30:39 +010087static spinlock_t state_lock;
88
89/* List and lock for all pasid_states */
90static LIST_HEAD(pasid_state_list);
Joerg Roedel2d5503b2011-11-24 10:41:57 +010091static DEFINE_SPINLOCK(ps_lock);
92
Joerg Roedel028eeac2011-11-24 12:48:13 +010093static struct workqueue_struct *iommu_wq;
94
Joerg Roedel8736b2c2011-11-24 16:21:52 +010095/*
96 * Empty page table - Used between
97 * mmu_notifier_invalidate_range_start and
98 * mmu_notifier_invalidate_range_end
99 */
100static u64 *empty_page_table;
101
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100102static void free_pasid_states(struct device_state *dev_state);
103static void unbind_pasid(struct device_state *dev_state, int pasid);
Joerg Roedeled96f222011-11-23 17:30:39 +0100104
105static u16 device_id(struct pci_dev *pdev)
106{
107 u16 devid;
108
109 devid = pdev->bus->number;
110 devid = (devid << 8) | pdev->devfn;
111
112 return devid;
113}
114
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200115static struct device_state *__get_device_state(u16 devid)
116{
Joerg Roedel741669c2014-05-20 23:18:23 +0200117 struct device_state *dev_state;
118
119 list_for_each_entry(dev_state, &state_list, list) {
120 if (dev_state->devid == devid)
121 return dev_state;
122 }
123
124 return NULL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200125}
126
Joerg Roedeled96f222011-11-23 17:30:39 +0100127static struct device_state *get_device_state(u16 devid)
128{
129 struct device_state *dev_state;
130 unsigned long flags;
131
132 spin_lock_irqsave(&state_lock, flags);
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200133 dev_state = __get_device_state(devid);
Joerg Roedeled96f222011-11-23 17:30:39 +0100134 if (dev_state != NULL)
135 atomic_inc(&dev_state->count);
136 spin_unlock_irqrestore(&state_lock, flags);
137
138 return dev_state;
139}
140
141static void free_device_state(struct device_state *dev_state)
142{
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100143 /*
144 * First detach device from domain - No more PRI requests will arrive
145 * from that device after it is unbound from the IOMMUv2 domain.
146 */
Joerg Roedeled96f222011-11-23 17:30:39 +0100147 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100148
149 /* Everything is down now, free the IOMMUv2 domain */
Joerg Roedeled96f222011-11-23 17:30:39 +0100150 iommu_domain_free(dev_state->domain);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100151
152 /* Finally get rid of the device-state */
Joerg Roedeled96f222011-11-23 17:30:39 +0100153 kfree(dev_state);
154}
155
156static void put_device_state(struct device_state *dev_state)
157{
158 if (atomic_dec_and_test(&dev_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100159 wake_up(&dev_state->wq);
Joerg Roedeled96f222011-11-23 17:30:39 +0100160}
161
Joerg Roedel028eeac2011-11-24 12:48:13 +0100162static void put_device_state_wait(struct device_state *dev_state)
163{
164 DEFINE_WAIT(wait);
165
166 prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
167 if (!atomic_dec_and_test(&dev_state->count))
168 schedule();
169 finish_wait(&dev_state->wq, &wait);
170
171 free_device_state(dev_state);
172}
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100173
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100174static void link_pasid_state(struct pasid_state *pasid_state)
175{
176 spin_lock(&ps_lock);
177 list_add_tail(&pasid_state->list, &pasid_state_list);
178 spin_unlock(&ps_lock);
179}
180
181static void __unlink_pasid_state(struct pasid_state *pasid_state)
182{
183 list_del(&pasid_state->list);
184}
185
186static void unlink_pasid_state(struct pasid_state *pasid_state)
187{
188 spin_lock(&ps_lock);
189 __unlink_pasid_state(pasid_state);
190 spin_unlock(&ps_lock);
191}
192
193/* Must be called under dev_state->lock */
194static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
195 int pasid, bool alloc)
196{
197 struct pasid_state **root, **ptr;
198 int level, index;
199
200 level = dev_state->pasid_levels;
201 root = dev_state->states;
202
203 while (true) {
204
205 index = (pasid >> (9 * level)) & 0x1ff;
206 ptr = &root[index];
207
208 if (level == 0)
209 break;
210
211 if (*ptr == NULL) {
212 if (!alloc)
213 return NULL;
214
215 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
216 if (*ptr == NULL)
217 return NULL;
218 }
219
220 root = (struct pasid_state **)*ptr;
221 level -= 1;
222 }
223
224 return ptr;
225}
226
227static int set_pasid_state(struct device_state *dev_state,
228 struct pasid_state *pasid_state,
229 int pasid)
230{
231 struct pasid_state **ptr;
232 unsigned long flags;
233 int ret;
234
235 spin_lock_irqsave(&dev_state->lock, flags);
236 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
237
238 ret = -ENOMEM;
239 if (ptr == NULL)
240 goto out_unlock;
241
242 ret = -ENOMEM;
243 if (*ptr != NULL)
244 goto out_unlock;
245
246 *ptr = pasid_state;
247
248 ret = 0;
249
250out_unlock:
251 spin_unlock_irqrestore(&dev_state->lock, flags);
252
253 return ret;
254}
255
256static void clear_pasid_state(struct device_state *dev_state, int pasid)
257{
258 struct pasid_state **ptr;
259 unsigned long flags;
260
261 spin_lock_irqsave(&dev_state->lock, flags);
262 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
263
264 if (ptr == NULL)
265 goto out_unlock;
266
267 *ptr = NULL;
268
269out_unlock:
270 spin_unlock_irqrestore(&dev_state->lock, flags);
271}
272
273static struct pasid_state *get_pasid_state(struct device_state *dev_state,
274 int pasid)
275{
276 struct pasid_state **ptr, *ret = NULL;
277 unsigned long flags;
278
279 spin_lock_irqsave(&dev_state->lock, flags);
280 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
281
282 if (ptr == NULL)
283 goto out_unlock;
284
285 ret = *ptr;
286 if (ret)
287 atomic_inc(&ret->count);
288
289out_unlock:
290 spin_unlock_irqrestore(&dev_state->lock, flags);
291
292 return ret;
293}
294
295static void free_pasid_state(struct pasid_state *pasid_state)
296{
297 kfree(pasid_state);
298}
299
300static void put_pasid_state(struct pasid_state *pasid_state)
301{
302 if (atomic_dec_and_test(&pasid_state->count)) {
303 put_device_state(pasid_state->device_state);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100304 wake_up(&pasid_state->wq);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100305 }
306}
307
Joerg Roedel028eeac2011-11-24 12:48:13 +0100308static void put_pasid_state_wait(struct pasid_state *pasid_state)
309{
310 DEFINE_WAIT(wait);
311
312 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
313
314 if (atomic_dec_and_test(&pasid_state->count))
315 put_device_state(pasid_state->device_state);
316 else
317 schedule();
318
319 finish_wait(&pasid_state->wq, &wait);
320 mmput(pasid_state->mm);
321 free_pasid_state(pasid_state);
322}
323
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100324static void __unbind_pasid(struct pasid_state *pasid_state)
325{
326 struct iommu_domain *domain;
327
328 domain = pasid_state->device_state->domain;
329
330 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
331 clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
332
333 /* Make sure no more pending faults are in the queue */
334 flush_workqueue(iommu_wq);
335
336 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
337
338 put_pasid_state(pasid_state); /* Reference taken in bind() function */
339}
340
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100341static void unbind_pasid(struct device_state *dev_state, int pasid)
342{
343 struct pasid_state *pasid_state;
344
345 pasid_state = get_pasid_state(dev_state, pasid);
346 if (pasid_state == NULL)
347 return;
348
349 unlink_pasid_state(pasid_state);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100350 __unbind_pasid(pasid_state);
351 put_pasid_state_wait(pasid_state); /* Reference taken in this function */
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100352}
353
354static void free_pasid_states_level1(struct pasid_state **tbl)
355{
356 int i;
357
358 for (i = 0; i < 512; ++i) {
359 if (tbl[i] == NULL)
360 continue;
361
362 free_page((unsigned long)tbl[i]);
363 }
364}
365
366static void free_pasid_states_level2(struct pasid_state **tbl)
367{
368 struct pasid_state **ptr;
369 int i;
370
371 for (i = 0; i < 512; ++i) {
372 if (tbl[i] == NULL)
373 continue;
374
375 ptr = (struct pasid_state **)tbl[i];
376 free_pasid_states_level1(ptr);
377 }
378}
379
380static void free_pasid_states(struct device_state *dev_state)
381{
382 struct pasid_state *pasid_state;
383 int i;
384
385 for (i = 0; i < dev_state->max_pasids; ++i) {
386 pasid_state = get_pasid_state(dev_state, i);
387 if (pasid_state == NULL)
388 continue;
389
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100390 put_pasid_state(pasid_state);
Joerg Roedela40d4c62014-05-20 23:18:24 +0200391
392 /*
393 * This will call the mn_release function and
394 * unbind the PASID
395 */
396 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100397 }
398
399 if (dev_state->pasid_levels == 2)
400 free_pasid_states_level2(dev_state->states);
401 else if (dev_state->pasid_levels == 1)
402 free_pasid_states_level1(dev_state->states);
403 else if (dev_state->pasid_levels != 0)
404 BUG();
405
406 free_page((unsigned long)dev_state->states);
407}
408
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100409static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
410{
411 return container_of(mn, struct pasid_state, mn);
412}
413
414static void __mn_flush_page(struct mmu_notifier *mn,
415 unsigned long address)
416{
417 struct pasid_state *pasid_state;
418 struct device_state *dev_state;
419
420 pasid_state = mn_to_state(mn);
421 dev_state = pasid_state->device_state;
422
423 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
424}
425
426static int mn_clear_flush_young(struct mmu_notifier *mn,
427 struct mm_struct *mm,
428 unsigned long address)
429{
430 __mn_flush_page(mn, address);
431
432 return 0;
433}
434
435static void mn_change_pte(struct mmu_notifier *mn,
436 struct mm_struct *mm,
437 unsigned long address,
438 pte_t pte)
439{
440 __mn_flush_page(mn, address);
441}
442
443static void mn_invalidate_page(struct mmu_notifier *mn,
444 struct mm_struct *mm,
445 unsigned long address)
446{
447 __mn_flush_page(mn, address);
448}
449
450static void mn_invalidate_range_start(struct mmu_notifier *mn,
451 struct mm_struct *mm,
452 unsigned long start, unsigned long end)
453{
454 struct pasid_state *pasid_state;
455 struct device_state *dev_state;
456
457 pasid_state = mn_to_state(mn);
458 dev_state = pasid_state->device_state;
459
460 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
461 __pa(empty_page_table));
462}
463
464static void mn_invalidate_range_end(struct mmu_notifier *mn,
465 struct mm_struct *mm,
466 unsigned long start, unsigned long end)
467{
468 struct pasid_state *pasid_state;
469 struct device_state *dev_state;
470
471 pasid_state = mn_to_state(mn);
472 dev_state = pasid_state->device_state;
473
474 amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
475 __pa(pasid_state->mm->pgd));
476}
477
Joerg Roedela40d4c62014-05-20 23:18:24 +0200478static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
479{
480 struct pasid_state *pasid_state;
481 struct device_state *dev_state;
482
483 might_sleep();
484
485 pasid_state = mn_to_state(mn);
486 dev_state = pasid_state->device_state;
487
488 if (pasid_state->device_state->inv_ctx_cb)
489 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
490
491 unbind_pasid(dev_state, pasid_state->pasid);
492}
493
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100494static struct mmu_notifier_ops iommu_mn = {
Joerg Roedela40d4c62014-05-20 23:18:24 +0200495 .release = mn_release,
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100496 .clear_flush_young = mn_clear_flush_young,
497 .change_pte = mn_change_pte,
498 .invalidate_page = mn_invalidate_page,
499 .invalidate_range_start = mn_invalidate_range_start,
500 .invalidate_range_end = mn_invalidate_range_end,
501};
502
Joerg Roedel028eeac2011-11-24 12:48:13 +0100503static void set_pri_tag_status(struct pasid_state *pasid_state,
504 u16 tag, int status)
505{
506 unsigned long flags;
507
508 spin_lock_irqsave(&pasid_state->lock, flags);
509 pasid_state->pri[tag].status = status;
510 spin_unlock_irqrestore(&pasid_state->lock, flags);
511}
512
513static void finish_pri_tag(struct device_state *dev_state,
514 struct pasid_state *pasid_state,
515 u16 tag)
516{
517 unsigned long flags;
518
519 spin_lock_irqsave(&pasid_state->lock, flags);
520 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
521 pasid_state->pri[tag].finish) {
522 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
523 pasid_state->pri[tag].status, tag);
524 pasid_state->pri[tag].finish = false;
525 pasid_state->pri[tag].status = PPR_SUCCESS;
526 }
527 spin_unlock_irqrestore(&pasid_state->lock, flags);
528}
529
530static void do_fault(struct work_struct *work)
531{
532 struct fault *fault = container_of(work, struct fault, work);
533 int npages, write;
534 struct page *page;
535
536 write = !!(fault->flags & PPR_FAULT_WRITE);
537
Jay Cornwall4378d992014-04-28 17:27:46 -0500538 down_read(&fault->state->mm->mmap_sem);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100539 npages = get_user_pages(fault->state->task, fault->state->mm,
540 fault->address, 1, write, 0, &page, NULL);
Jay Cornwall4378d992014-04-28 17:27:46 -0500541 up_read(&fault->state->mm->mmap_sem);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100542
Joerg Roedel175d6142011-11-28 14:36:36 +0100543 if (npages == 1) {
Joerg Roedel028eeac2011-11-24 12:48:13 +0100544 put_page(page);
Joerg Roedel175d6142011-11-28 14:36:36 +0100545 } else if (fault->dev_state->inv_ppr_cb) {
546 int status;
547
548 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
549 fault->pasid,
550 fault->address,
551 fault->flags);
552 switch (status) {
553 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
554 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
555 break;
556 case AMD_IOMMU_INV_PRI_RSP_INVALID:
557 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
558 break;
559 case AMD_IOMMU_INV_PRI_RSP_FAIL:
560 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
561 break;
562 default:
563 BUG();
564 }
565 } else {
Joerg Roedel028eeac2011-11-24 12:48:13 +0100566 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
Joerg Roedel175d6142011-11-28 14:36:36 +0100567 }
Joerg Roedel028eeac2011-11-24 12:48:13 +0100568
569 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
570
571 put_pasid_state(fault->state);
572
573 kfree(fault);
574}
575
576static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
577{
578 struct amd_iommu_fault *iommu_fault;
579 struct pasid_state *pasid_state;
580 struct device_state *dev_state;
581 unsigned long flags;
582 struct fault *fault;
583 bool finish;
584 u16 tag;
585 int ret;
586
587 iommu_fault = data;
588 tag = iommu_fault->tag & 0x1ff;
589 finish = (iommu_fault->tag >> 9) & 1;
590
591 ret = NOTIFY_DONE;
592 dev_state = get_device_state(iommu_fault->device_id);
593 if (dev_state == NULL)
594 goto out;
595
596 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
597 if (pasid_state == NULL) {
598 /* We know the device but not the PASID -> send INVALID */
599 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
600 PPR_INVALID, tag);
601 goto out_drop_state;
602 }
603
604 spin_lock_irqsave(&pasid_state->lock, flags);
605 atomic_inc(&pasid_state->pri[tag].inflight);
606 if (finish)
607 pasid_state->pri[tag].finish = true;
608 spin_unlock_irqrestore(&pasid_state->lock, flags);
609
610 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
611 if (fault == NULL) {
612 /* We are OOM - send success and let the device re-fault */
613 finish_pri_tag(dev_state, pasid_state, tag);
614 goto out_drop_state;
615 }
616
617 fault->dev_state = dev_state;
618 fault->address = iommu_fault->address;
619 fault->state = pasid_state;
620 fault->tag = tag;
621 fault->finish = finish;
622 fault->flags = iommu_fault->flags;
623 INIT_WORK(&fault->work, do_fault);
624
625 queue_work(iommu_wq, &fault->work);
626
627 ret = NOTIFY_OK;
628
629out_drop_state:
630 put_device_state(dev_state);
631
632out:
633 return ret;
634}
635
636static struct notifier_block ppr_nb = {
637 .notifier_call = ppr_notifier,
638};
639
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100640int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
641 struct task_struct *task)
642{
643 struct pasid_state *pasid_state;
644 struct device_state *dev_state;
645 u16 devid;
646 int ret;
647
648 might_sleep();
649
650 if (!amd_iommu_v2_supported())
651 return -ENODEV;
652
653 devid = device_id(pdev);
654 dev_state = get_device_state(devid);
655
656 if (dev_state == NULL)
657 return -EINVAL;
658
659 ret = -EINVAL;
660 if (pasid < 0 || pasid >= dev_state->max_pasids)
661 goto out;
662
663 ret = -ENOMEM;
664 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
665 if (pasid_state == NULL)
666 goto out;
667
668 atomic_set(&pasid_state->count, 1);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100669 init_waitqueue_head(&pasid_state->wq);
Joerg Roedel2c13d472012-07-19 10:56:10 +0200670 spin_lock_init(&pasid_state->lock);
671
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100672 pasid_state->task = task;
673 pasid_state->mm = get_task_mm(task);
674 pasid_state->device_state = dev_state;
675 pasid_state->pasid = pasid;
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100676 pasid_state->mn.ops = &iommu_mn;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100677
678 if (pasid_state->mm == NULL)
679 goto out_free;
680
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100681 mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
682
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100683 ret = set_pasid_state(dev_state, pasid_state, pasid);
684 if (ret)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100685 goto out_unregister;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100686
687 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
688 __pa(pasid_state->mm->pgd));
689 if (ret)
690 goto out_clear_state;
691
692 link_pasid_state(pasid_state);
693
694 return 0;
695
696out_clear_state:
697 clear_pasid_state(dev_state, pasid);
698
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100699out_unregister:
700 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
701
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100702out_free:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100703 free_pasid_state(pasid_state);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100704
705out:
706 put_device_state(dev_state);
707
708 return ret;
709}
710EXPORT_SYMBOL(amd_iommu_bind_pasid);
711
712void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
713{
Joerg Roedela40d4c62014-05-20 23:18:24 +0200714 struct pasid_state *pasid_state;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100715 struct device_state *dev_state;
716 u16 devid;
717
718 might_sleep();
719
720 if (!amd_iommu_v2_supported())
721 return;
722
723 devid = device_id(pdev);
724 dev_state = get_device_state(devid);
725 if (dev_state == NULL)
726 return;
727
728 if (pasid < 0 || pasid >= dev_state->max_pasids)
729 goto out;
730
Joerg Roedela40d4c62014-05-20 23:18:24 +0200731 pasid_state = get_pasid_state(dev_state, pasid);
732 if (pasid_state == NULL)
733 goto out;
734 /*
735 * Drop reference taken here. We are safe because we still hold
736 * the reference taken in the amd_iommu_bind_pasid function.
737 */
738 put_pasid_state(pasid_state);
739
740 /* This will call the mn_release function and unbind the PASID */
741 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100742
743out:
744 put_device_state(dev_state);
745}
746EXPORT_SYMBOL(amd_iommu_unbind_pasid);
747
Joerg Roedeled96f222011-11-23 17:30:39 +0100748int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
749{
750 struct device_state *dev_state;
751 unsigned long flags;
752 int ret, tmp;
753 u16 devid;
754
755 might_sleep();
756
757 if (!amd_iommu_v2_supported())
758 return -ENODEV;
759
760 if (pasids <= 0 || pasids > (PASID_MASK + 1))
761 return -EINVAL;
762
763 devid = device_id(pdev);
764
765 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
766 if (dev_state == NULL)
767 return -ENOMEM;
768
769 spin_lock_init(&dev_state->lock);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100770 init_waitqueue_head(&dev_state->wq);
Joerg Roedel741669c2014-05-20 23:18:23 +0200771 dev_state->pdev = pdev;
772 dev_state->devid = devid;
Joerg Roedeled96f222011-11-23 17:30:39 +0100773
774 tmp = pasids;
775 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
776 dev_state->pasid_levels += 1;
777
778 atomic_set(&dev_state->count, 1);
779 dev_state->max_pasids = pasids;
780
781 ret = -ENOMEM;
782 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
783 if (dev_state->states == NULL)
784 goto out_free_dev_state;
785
786 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
787 if (dev_state->domain == NULL)
788 goto out_free_states;
789
790 amd_iommu_domain_direct_map(dev_state->domain);
791
792 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
793 if (ret)
794 goto out_free_domain;
795
796 ret = iommu_attach_device(dev_state->domain, &pdev->dev);
797 if (ret != 0)
798 goto out_free_domain;
799
800 spin_lock_irqsave(&state_lock, flags);
801
Joerg Roedel741669c2014-05-20 23:18:23 +0200802 if (__get_device_state(devid) != NULL) {
Joerg Roedeled96f222011-11-23 17:30:39 +0100803 spin_unlock_irqrestore(&state_lock, flags);
804 ret = -EBUSY;
805 goto out_free_domain;
806 }
807
Joerg Roedel741669c2014-05-20 23:18:23 +0200808 list_add_tail(&dev_state->list, &state_list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100809
810 spin_unlock_irqrestore(&state_lock, flags);
811
812 return 0;
813
814out_free_domain:
815 iommu_domain_free(dev_state->domain);
816
817out_free_states:
818 free_page((unsigned long)dev_state->states);
819
820out_free_dev_state:
821 kfree(dev_state);
822
823 return ret;
824}
825EXPORT_SYMBOL(amd_iommu_init_device);
826
827void amd_iommu_free_device(struct pci_dev *pdev)
828{
829 struct device_state *dev_state;
830 unsigned long flags;
831 u16 devid;
832
833 if (!amd_iommu_v2_supported())
834 return;
835
836 devid = device_id(pdev);
837
838 spin_lock_irqsave(&state_lock, flags);
839
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200840 dev_state = __get_device_state(devid);
Joerg Roedeled96f222011-11-23 17:30:39 +0100841 if (dev_state == NULL) {
842 spin_unlock_irqrestore(&state_lock, flags);
843 return;
844 }
845
Joerg Roedel741669c2014-05-20 23:18:23 +0200846 list_del(&dev_state->list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100847
848 spin_unlock_irqrestore(&state_lock, flags);
849
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100850 /* Get rid of any remaining pasid states */
851 free_pasid_states(dev_state);
852
Joerg Roedel028eeac2011-11-24 12:48:13 +0100853 put_device_state_wait(dev_state);
Joerg Roedeled96f222011-11-23 17:30:39 +0100854}
855EXPORT_SYMBOL(amd_iommu_free_device);
856
Joerg Roedel175d6142011-11-28 14:36:36 +0100857int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
858 amd_iommu_invalid_ppr_cb cb)
859{
860 struct device_state *dev_state;
861 unsigned long flags;
862 u16 devid;
863 int ret;
864
865 if (!amd_iommu_v2_supported())
866 return -ENODEV;
867
868 devid = device_id(pdev);
869
870 spin_lock_irqsave(&state_lock, flags);
871
872 ret = -EINVAL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200873 dev_state = __get_device_state(devid);
Joerg Roedel175d6142011-11-28 14:36:36 +0100874 if (dev_state == NULL)
875 goto out_unlock;
876
877 dev_state->inv_ppr_cb = cb;
878
879 ret = 0;
880
881out_unlock:
882 spin_unlock_irqrestore(&state_lock, flags);
883
884 return ret;
885}
886EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
887
Joerg Roedelbc216622011-12-07 12:24:42 +0100888int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
889 amd_iommu_invalidate_ctx cb)
890{
891 struct device_state *dev_state;
892 unsigned long flags;
893 u16 devid;
894 int ret;
895
896 if (!amd_iommu_v2_supported())
897 return -ENODEV;
898
899 devid = device_id(pdev);
900
901 spin_lock_irqsave(&state_lock, flags);
902
903 ret = -EINVAL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200904 dev_state = __get_device_state(devid);
Joerg Roedelbc216622011-12-07 12:24:42 +0100905 if (dev_state == NULL)
906 goto out_unlock;
907
908 dev_state->inv_ctx_cb = cb;
909
910 ret = 0;
911
912out_unlock:
913 spin_unlock_irqrestore(&state_lock, flags);
914
915 return ret;
916}
917EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
918
Joerg Roedele3c495c2011-11-09 12:31:15 +0100919static int __init amd_iommu_v2_init(void)
920{
Joerg Roedel028eeac2011-11-24 12:48:13 +0100921 int ret;
Joerg Roedeled96f222011-11-23 17:30:39 +0100922
Joerg Roedel474d567d2012-03-15 12:46:40 +0100923 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
924
925 if (!amd_iommu_v2_supported()) {
Masanari Iida07db0402012-07-22 02:21:32 +0900926 pr_info("AMD IOMMUv2 functionality not available on this system\n");
Joerg Roedel474d567d2012-03-15 12:46:40 +0100927 /*
928 * Load anyway to provide the symbols to other modules
929 * which may use AMD IOMMUv2 optionally.
930 */
931 return 0;
932 }
Joerg Roedele3c495c2011-11-09 12:31:15 +0100933
Joerg Roedeled96f222011-11-23 17:30:39 +0100934 spin_lock_init(&state_lock);
935
Joerg Roedel028eeac2011-11-24 12:48:13 +0100936 ret = -ENOMEM;
937 iommu_wq = create_workqueue("amd_iommu_v2");
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100938 if (iommu_wq == NULL)
Joerg Roedel741669c2014-05-20 23:18:23 +0200939 goto out;
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100940
941 ret = -ENOMEM;
942 empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
943 if (empty_page_table == NULL)
944 goto out_destroy_wq;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100945
946 amd_iommu_register_ppr_notifier(&ppr_nb);
947
Joerg Roedele3c495c2011-11-09 12:31:15 +0100948 return 0;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100949
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100950out_destroy_wq:
951 destroy_workqueue(iommu_wq);
952
Joerg Roedel741669c2014-05-20 23:18:23 +0200953out:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100954 return ret;
Joerg Roedele3c495c2011-11-09 12:31:15 +0100955}
956
957static void __exit amd_iommu_v2_exit(void)
958{
Joerg Roedeled96f222011-11-23 17:30:39 +0100959 struct device_state *dev_state;
Joerg Roedeled96f222011-11-23 17:30:39 +0100960 int i;
961
Joerg Roedel474d567d2012-03-15 12:46:40 +0100962 if (!amd_iommu_v2_supported())
963 return;
964
Joerg Roedel028eeac2011-11-24 12:48:13 +0100965 amd_iommu_unregister_ppr_notifier(&ppr_nb);
966
967 flush_workqueue(iommu_wq);
968
969 /*
970 * The loop below might call flush_workqueue(), so call
971 * destroy_workqueue() after it
972 */
Joerg Roedeled96f222011-11-23 17:30:39 +0100973 for (i = 0; i < MAX_DEVICES; ++i) {
974 dev_state = get_device_state(i);
975
976 if (dev_state == NULL)
977 continue;
978
979 WARN_ON_ONCE(1);
980
Joerg Roedeled96f222011-11-23 17:30:39 +0100981 put_device_state(dev_state);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100982 amd_iommu_free_device(dev_state->pdev);
Joerg Roedeled96f222011-11-23 17:30:39 +0100983 }
984
Joerg Roedel028eeac2011-11-24 12:48:13 +0100985 destroy_workqueue(iommu_wq);
986
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100987 free_page((unsigned long)empty_page_table);
Joerg Roedele3c495c2011-11-09 12:31:15 +0100988}
989
990module_init(amd_iommu_v2_init);
991module_exit(amd_iommu_v2_exit);