Joerg Roedel | f6e2e6b | 2008-06-26 21:27:39 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. |
| 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
| 4 | * Leo Duran <leo.duran@amd.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License version 2 as published |
| 8 | * by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | |
| 20 | #include <linux/pci.h> |
| 21 | #include <linux/acpi.h> |
| 22 | #include <linux/gfp.h> |
| 23 | #include <linux/list.h> |
Joerg Roedel | 7441e9c | 2008-06-30 20:18:02 +0200 | [diff] [blame] | 24 | #include <linux/sysdev.h> |
Joerg Roedel | f6e2e6b | 2008-06-26 21:27:39 +0200 | [diff] [blame] | 25 | #include <asm/pci-direct.h> |
| 26 | #include <asm/amd_iommu_types.h> |
Joerg Roedel | c6da992 | 2008-06-26 21:28:06 +0200 | [diff] [blame] | 27 | #include <asm/amd_iommu.h> |
Joerg Roedel | f6e2e6b | 2008-06-26 21:27:39 +0200 | [diff] [blame] | 28 | #include <asm/gart.h> |
| 29 | |
| 30 | /* |
| 31 | * definitions for the ACPI scanning code |
| 32 | */ |
| 33 | #define UPDATE_LAST_BDF(x) do {\ |
| 34 | if ((x) > amd_iommu_last_bdf) \ |
| 35 | amd_iommu_last_bdf = (x); \ |
| 36 | } while (0); |
| 37 | |
| 38 | #define DEVID(bus, devfn) (((bus) << 8) | (devfn)) |
| 39 | #define PCI_BUS(x) (((x) >> 8) & 0xff) |
| 40 | #define IVRS_HEADER_LENGTH 48 |
| 41 | #define TBL_SIZE(x) (1 << (PAGE_SHIFT + get_order(amd_iommu_last_bdf * (x)))) |
| 42 | |
| 43 | #define ACPI_IVHD_TYPE 0x10 |
| 44 | #define ACPI_IVMD_TYPE_ALL 0x20 |
| 45 | #define ACPI_IVMD_TYPE 0x21 |
| 46 | #define ACPI_IVMD_TYPE_RANGE 0x22 |
| 47 | |
| 48 | #define IVHD_DEV_ALL 0x01 |
| 49 | #define IVHD_DEV_SELECT 0x02 |
| 50 | #define IVHD_DEV_SELECT_RANGE_START 0x03 |
| 51 | #define IVHD_DEV_RANGE_END 0x04 |
| 52 | #define IVHD_DEV_ALIAS 0x42 |
| 53 | #define IVHD_DEV_ALIAS_RANGE 0x43 |
| 54 | #define IVHD_DEV_EXT_SELECT 0x46 |
| 55 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 |
| 56 | |
| 57 | #define IVHD_FLAG_HT_TUN_EN 0x00 |
| 58 | #define IVHD_FLAG_PASSPW_EN 0x01 |
| 59 | #define IVHD_FLAG_RESPASSPW_EN 0x02 |
| 60 | #define IVHD_FLAG_ISOC_EN 0x03 |
| 61 | |
| 62 | #define IVMD_FLAG_EXCL_RANGE 0x08 |
| 63 | #define IVMD_FLAG_UNITY_MAP 0x01 |
| 64 | |
| 65 | #define ACPI_DEVFLAG_INITPASS 0x01 |
| 66 | #define ACPI_DEVFLAG_EXTINT 0x02 |
| 67 | #define ACPI_DEVFLAG_NMI 0x04 |
| 68 | #define ACPI_DEVFLAG_SYSMGT1 0x10 |
| 69 | #define ACPI_DEVFLAG_SYSMGT2 0x20 |
| 70 | #define ACPI_DEVFLAG_LINT0 0x40 |
| 71 | #define ACPI_DEVFLAG_LINT1 0x80 |
| 72 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 |
| 73 | |
| 74 | struct ivhd_header { |
| 75 | u8 type; |
| 76 | u8 flags; |
| 77 | u16 length; |
| 78 | u16 devid; |
| 79 | u16 cap_ptr; |
| 80 | u64 mmio_phys; |
| 81 | u16 pci_seg; |
| 82 | u16 info; |
| 83 | u32 reserved; |
| 84 | } __attribute__((packed)); |
| 85 | |
| 86 | struct ivhd_entry { |
| 87 | u8 type; |
| 88 | u16 devid; |
| 89 | u8 flags; |
| 90 | u32 ext; |
| 91 | } __attribute__((packed)); |
| 92 | |
| 93 | struct ivmd_header { |
| 94 | u8 type; |
| 95 | u8 flags; |
| 96 | u16 length; |
| 97 | u16 devid; |
| 98 | u16 aux; |
| 99 | u64 resv; |
| 100 | u64 range_start; |
| 101 | u64 range_length; |
| 102 | } __attribute__((packed)); |
| 103 | |
Joerg Roedel | c1cbebe | 2008-07-03 19:35:10 +0200 | [diff] [blame^] | 104 | static int __initdata amd_iommu_detected; |
| 105 | |
Joerg Roedel | 928abd2 | 2008-06-26 21:27:40 +0200 | [diff] [blame] | 106 | u16 amd_iommu_last_bdf; |
| 107 | struct list_head amd_iommu_unity_map; |
| 108 | unsigned amd_iommu_aperture_order = 26; |
| 109 | int amd_iommu_isolate; |
| 110 | |
| 111 | struct list_head amd_iommu_list; |
| 112 | struct dev_table_entry *amd_iommu_dev_table; |
| 113 | u16 *amd_iommu_alias_table; |
| 114 | struct amd_iommu **amd_iommu_rlookup_table; |
| 115 | struct protection_domain **amd_iommu_pd_table; |
| 116 | unsigned long *amd_iommu_pd_alloc_bitmap; |
| 117 | |
| 118 | static u32 dev_table_size; |
| 119 | static u32 alias_table_size; |
| 120 | static u32 rlookup_table_size; |
Joerg Roedel | 3e8064b | 2008-06-26 21:27:41 +0200 | [diff] [blame] | 121 | |
Joerg Roedel | b2026aa | 2008-06-26 21:27:44 +0200 | [diff] [blame] | 122 | static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) |
| 123 | { |
| 124 | u64 start = iommu->exclusion_start & PAGE_MASK; |
| 125 | u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; |
| 126 | u64 entry; |
| 127 | |
| 128 | if (!iommu->exclusion_start) |
| 129 | return; |
| 130 | |
| 131 | entry = start | MMIO_EXCL_ENABLE_MASK; |
| 132 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, |
| 133 | &entry, sizeof(entry)); |
| 134 | |
| 135 | entry = limit; |
| 136 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, |
| 137 | &entry, sizeof(entry)); |
| 138 | } |
| 139 | |
| 140 | static void __init iommu_set_device_table(struct amd_iommu *iommu) |
| 141 | { |
| 142 | u32 entry; |
| 143 | |
| 144 | BUG_ON(iommu->mmio_base == NULL); |
| 145 | |
| 146 | entry = virt_to_phys(amd_iommu_dev_table); |
| 147 | entry |= (dev_table_size >> 12) - 1; |
| 148 | memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, |
| 149 | &entry, sizeof(entry)); |
| 150 | } |
| 151 | |
| 152 | static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit) |
| 153 | { |
| 154 | u32 ctrl; |
| 155 | |
| 156 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
| 157 | ctrl |= (1 << bit); |
| 158 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
| 159 | } |
| 160 | |
| 161 | static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) |
| 162 | { |
| 163 | u32 ctrl; |
| 164 | |
| 165 | ctrl = (u64)readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
| 166 | ctrl &= ~(1 << bit); |
| 167 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
| 168 | } |
| 169 | |
| 170 | void __init iommu_enable(struct amd_iommu *iommu) |
| 171 | { |
| 172 | u32 ctrl; |
| 173 | |
| 174 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); |
| 175 | print_devid(iommu->devid, 0); |
| 176 | printk(" cap 0x%hx\n", iommu->cap_ptr); |
| 177 | |
| 178 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
| 179 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
| 180 | } |
| 181 | |
Joerg Roedel | 6c56747 | 2008-06-26 21:27:43 +0200 | [diff] [blame] | 182 | static u8 * __init iommu_map_mmio_space(u64 address) |
| 183 | { |
| 184 | u8 *ret; |
| 185 | |
| 186 | if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) |
| 187 | return NULL; |
| 188 | |
| 189 | ret = ioremap_nocache(address, MMIO_REGION_LENGTH); |
| 190 | if (ret != NULL) |
| 191 | return ret; |
| 192 | |
| 193 | release_mem_region(address, MMIO_REGION_LENGTH); |
| 194 | |
| 195 | return NULL; |
| 196 | } |
| 197 | |
| 198 | static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) |
| 199 | { |
| 200 | if (iommu->mmio_base) |
| 201 | iounmap(iommu->mmio_base); |
| 202 | release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); |
| 203 | } |
| 204 | |
Joerg Roedel | 3e8064b | 2008-06-26 21:27:41 +0200 | [diff] [blame] | 205 | static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) |
| 206 | { |
| 207 | u32 cap; |
| 208 | |
| 209 | cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); |
| 210 | UPDATE_LAST_BDF(DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); |
| 211 | |
| 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) |
| 216 | { |
| 217 | u8 *p = (void *)h, *end = (void *)h; |
| 218 | struct ivhd_entry *dev; |
| 219 | |
| 220 | p += sizeof(*h); |
| 221 | end += h->length; |
| 222 | |
| 223 | find_last_devid_on_pci(PCI_BUS(h->devid), |
| 224 | PCI_SLOT(h->devid), |
| 225 | PCI_FUNC(h->devid), |
| 226 | h->cap_ptr); |
| 227 | |
| 228 | while (p < end) { |
| 229 | dev = (struct ivhd_entry *)p; |
| 230 | switch (dev->type) { |
| 231 | case IVHD_DEV_SELECT: |
| 232 | case IVHD_DEV_RANGE_END: |
| 233 | case IVHD_DEV_ALIAS: |
| 234 | case IVHD_DEV_EXT_SELECT: |
| 235 | UPDATE_LAST_BDF(dev->devid); |
| 236 | break; |
| 237 | default: |
| 238 | break; |
| 239 | } |
| 240 | p += 0x04 << (*p >> 6); |
| 241 | } |
| 242 | |
| 243 | WARN_ON(p != end); |
| 244 | |
| 245 | return 0; |
| 246 | } |
| 247 | |
| 248 | static int __init find_last_devid_acpi(struct acpi_table_header *table) |
| 249 | { |
| 250 | int i; |
| 251 | u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; |
| 252 | struct ivhd_header *h; |
| 253 | |
| 254 | /* |
| 255 | * Validate checksum here so we don't need to do it when |
| 256 | * we actually parse the table |
| 257 | */ |
| 258 | for (i = 0; i < table->length; ++i) |
| 259 | checksum += p[i]; |
| 260 | if (checksum != 0) |
| 261 | /* ACPI table corrupt */ |
| 262 | return -ENODEV; |
| 263 | |
| 264 | p += IVRS_HEADER_LENGTH; |
| 265 | |
| 266 | end += table->length; |
| 267 | while (p < end) { |
| 268 | h = (struct ivhd_header *)p; |
| 269 | switch (h->type) { |
| 270 | case ACPI_IVHD_TYPE: |
| 271 | find_last_devid_from_ivhd(h); |
| 272 | break; |
| 273 | default: |
| 274 | break; |
| 275 | } |
| 276 | p += h->length; |
| 277 | } |
| 278 | WARN_ON(p != end); |
| 279 | |
| 280 | return 0; |
| 281 | } |
| 282 | |
Joerg Roedel | b36ca91 | 2008-06-26 21:27:45 +0200 | [diff] [blame] | 283 | static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) |
| 284 | { |
| 285 | u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL, |
| 286 | get_order(CMD_BUFFER_SIZE)); |
| 287 | u64 entry = 0; |
| 288 | |
| 289 | if (cmd_buf == NULL) |
| 290 | return NULL; |
| 291 | |
| 292 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; |
| 293 | |
| 294 | memset(cmd_buf, 0, CMD_BUFFER_SIZE); |
| 295 | |
| 296 | entry = (u64)virt_to_phys(cmd_buf); |
| 297 | entry |= MMIO_CMD_SIZE_512; |
| 298 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
| 299 | &entry, sizeof(entry)); |
| 300 | |
| 301 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); |
| 302 | |
| 303 | return cmd_buf; |
| 304 | } |
| 305 | |
| 306 | static void __init free_command_buffer(struct amd_iommu *iommu) |
| 307 | { |
| 308 | if (iommu->cmd_buf) |
| 309 | free_pages((unsigned long)iommu->cmd_buf, |
| 310 | get_order(CMD_BUFFER_SIZE)); |
| 311 | } |
| 312 | |
Joerg Roedel | 3566b77 | 2008-06-26 21:27:46 +0200 | [diff] [blame] | 313 | static void set_dev_entry_bit(u16 devid, u8 bit) |
| 314 | { |
| 315 | int i = (bit >> 5) & 0x07; |
| 316 | int _bit = bit & 0x1f; |
| 317 | |
| 318 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); |
| 319 | } |
| 320 | |
| 321 | static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags) |
| 322 | { |
| 323 | if (flags & ACPI_DEVFLAG_INITPASS) |
| 324 | set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); |
| 325 | if (flags & ACPI_DEVFLAG_EXTINT) |
| 326 | set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); |
| 327 | if (flags & ACPI_DEVFLAG_NMI) |
| 328 | set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); |
| 329 | if (flags & ACPI_DEVFLAG_SYSMGT1) |
| 330 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); |
| 331 | if (flags & ACPI_DEVFLAG_SYSMGT2) |
| 332 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); |
| 333 | if (flags & ACPI_DEVFLAG_LINT0) |
| 334 | set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); |
| 335 | if (flags & ACPI_DEVFLAG_LINT1) |
| 336 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); |
| 337 | } |
| 338 | |
| 339 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) |
| 340 | { |
| 341 | amd_iommu_rlookup_table[devid] = iommu; |
| 342 | } |
| 343 | |
| 344 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) |
| 345 | { |
| 346 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; |
| 347 | |
| 348 | if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) |
| 349 | return; |
| 350 | |
| 351 | if (iommu) { |
| 352 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); |
| 353 | iommu->exclusion_start = m->range_start; |
| 354 | iommu->exclusion_length = m->range_length; |
| 355 | } |
| 356 | } |
| 357 | |
Joerg Roedel | 5d0c8e4 | 2008-06-26 21:27:47 +0200 | [diff] [blame] | 358 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) |
| 359 | { |
| 360 | int bus = PCI_BUS(iommu->devid); |
| 361 | int dev = PCI_SLOT(iommu->devid); |
| 362 | int fn = PCI_FUNC(iommu->devid); |
| 363 | int cap_ptr = iommu->cap_ptr; |
| 364 | u32 range; |
| 365 | |
| 366 | iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET); |
| 367 | |
| 368 | range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); |
| 369 | iommu->first_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_FD(range)); |
| 370 | iommu->last_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range)); |
| 371 | } |
| 372 | |
| 373 | static void __init init_iommu_from_acpi(struct amd_iommu *iommu, |
| 374 | struct ivhd_header *h) |
| 375 | { |
| 376 | u8 *p = (u8 *)h; |
| 377 | u8 *end = p, flags = 0; |
| 378 | u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; |
| 379 | u32 ext_flags = 0; |
| 380 | bool alias = 0; |
| 381 | struct ivhd_entry *e; |
| 382 | |
| 383 | /* |
| 384 | * First set the recommended feature enable bits from ACPI |
| 385 | * into the IOMMU control registers |
| 386 | */ |
| 387 | h->flags & IVHD_FLAG_HT_TUN_EN ? |
| 388 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : |
| 389 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); |
| 390 | |
| 391 | h->flags & IVHD_FLAG_PASSPW_EN ? |
| 392 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : |
| 393 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); |
| 394 | |
| 395 | h->flags & IVHD_FLAG_RESPASSPW_EN ? |
| 396 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : |
| 397 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); |
| 398 | |
| 399 | h->flags & IVHD_FLAG_ISOC_EN ? |
| 400 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : |
| 401 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); |
| 402 | |
| 403 | /* |
| 404 | * make IOMMU memory accesses cache coherent |
| 405 | */ |
| 406 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); |
| 407 | |
| 408 | /* |
| 409 | * Done. Now parse the device entries |
| 410 | */ |
| 411 | p += sizeof(struct ivhd_header); |
| 412 | end += h->length; |
| 413 | |
| 414 | while (p < end) { |
| 415 | e = (struct ivhd_entry *)p; |
| 416 | switch (e->type) { |
| 417 | case IVHD_DEV_ALL: |
| 418 | for (dev_i = iommu->first_device; |
| 419 | dev_i <= iommu->last_device; ++dev_i) |
| 420 | set_dev_entry_from_acpi(dev_i, e->flags, 0); |
| 421 | break; |
| 422 | case IVHD_DEV_SELECT: |
| 423 | devid = e->devid; |
| 424 | set_dev_entry_from_acpi(devid, e->flags, 0); |
| 425 | break; |
| 426 | case IVHD_DEV_SELECT_RANGE_START: |
| 427 | devid_start = e->devid; |
| 428 | flags = e->flags; |
| 429 | ext_flags = 0; |
| 430 | alias = 0; |
| 431 | break; |
| 432 | case IVHD_DEV_ALIAS: |
| 433 | devid = e->devid; |
| 434 | devid_to = e->ext >> 8; |
| 435 | set_dev_entry_from_acpi(devid, e->flags, 0); |
| 436 | amd_iommu_alias_table[devid] = devid_to; |
| 437 | break; |
| 438 | case IVHD_DEV_ALIAS_RANGE: |
| 439 | devid_start = e->devid; |
| 440 | flags = e->flags; |
| 441 | devid_to = e->ext >> 8; |
| 442 | ext_flags = 0; |
| 443 | alias = 1; |
| 444 | break; |
| 445 | case IVHD_DEV_EXT_SELECT: |
| 446 | devid = e->devid; |
| 447 | set_dev_entry_from_acpi(devid, e->flags, e->ext); |
| 448 | break; |
| 449 | case IVHD_DEV_EXT_SELECT_RANGE: |
| 450 | devid_start = e->devid; |
| 451 | flags = e->flags; |
| 452 | ext_flags = e->ext; |
| 453 | alias = 0; |
| 454 | break; |
| 455 | case IVHD_DEV_RANGE_END: |
| 456 | devid = e->devid; |
| 457 | for (dev_i = devid_start; dev_i <= devid; ++dev_i) { |
| 458 | if (alias) |
| 459 | amd_iommu_alias_table[dev_i] = devid_to; |
| 460 | set_dev_entry_from_acpi( |
| 461 | amd_iommu_alias_table[dev_i], |
| 462 | flags, ext_flags); |
| 463 | } |
| 464 | break; |
| 465 | default: |
| 466 | break; |
| 467 | } |
| 468 | |
| 469 | p += 0x04 << (e->type >> 6); |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | static int __init init_iommu_devices(struct amd_iommu *iommu) |
| 474 | { |
| 475 | u16 i; |
| 476 | |
| 477 | for (i = iommu->first_device; i <= iommu->last_device; ++i) |
| 478 | set_iommu_for_device(iommu, i); |
| 479 | |
| 480 | return 0; |
| 481 | } |
| 482 | |
Joerg Roedel | e47d402 | 2008-06-26 21:27:48 +0200 | [diff] [blame] | 483 | static void __init free_iommu_one(struct amd_iommu *iommu) |
| 484 | { |
| 485 | free_command_buffer(iommu); |
| 486 | iommu_unmap_mmio_space(iommu); |
| 487 | } |
| 488 | |
| 489 | static void __init free_iommu_all(void) |
| 490 | { |
| 491 | struct amd_iommu *iommu, *next; |
| 492 | |
| 493 | list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) { |
| 494 | list_del(&iommu->list); |
| 495 | free_iommu_one(iommu); |
| 496 | kfree(iommu); |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
| 501 | { |
| 502 | spin_lock_init(&iommu->lock); |
| 503 | list_add_tail(&iommu->list, &amd_iommu_list); |
| 504 | |
| 505 | /* |
| 506 | * Copy data from ACPI table entry to the iommu struct |
| 507 | */ |
| 508 | iommu->devid = h->devid; |
| 509 | iommu->cap_ptr = h->cap_ptr; |
| 510 | iommu->mmio_phys = h->mmio_phys; |
| 511 | iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); |
| 512 | if (!iommu->mmio_base) |
| 513 | return -ENOMEM; |
| 514 | |
| 515 | iommu_set_device_table(iommu); |
| 516 | iommu->cmd_buf = alloc_command_buffer(iommu); |
| 517 | if (!iommu->cmd_buf) |
| 518 | return -ENOMEM; |
| 519 | |
| 520 | init_iommu_from_pci(iommu); |
| 521 | init_iommu_from_acpi(iommu, h); |
| 522 | init_iommu_devices(iommu); |
| 523 | |
| 524 | return 0; |
| 525 | } |
| 526 | |
| 527 | static int __init init_iommu_all(struct acpi_table_header *table) |
| 528 | { |
| 529 | u8 *p = (u8 *)table, *end = (u8 *)table; |
| 530 | struct ivhd_header *h; |
| 531 | struct amd_iommu *iommu; |
| 532 | int ret; |
| 533 | |
| 534 | INIT_LIST_HEAD(&amd_iommu_list); |
| 535 | |
| 536 | end += table->length; |
| 537 | p += IVRS_HEADER_LENGTH; |
| 538 | |
| 539 | while (p < end) { |
| 540 | h = (struct ivhd_header *)p; |
| 541 | switch (*p) { |
| 542 | case ACPI_IVHD_TYPE: |
| 543 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
| 544 | if (iommu == NULL) |
| 545 | return -ENOMEM; |
| 546 | ret = init_iommu_one(iommu, h); |
| 547 | if (ret) |
| 548 | return ret; |
| 549 | break; |
| 550 | default: |
| 551 | break; |
| 552 | } |
| 553 | p += h->length; |
| 554 | |
| 555 | } |
| 556 | WARN_ON(p != end); |
| 557 | |
| 558 | return 0; |
| 559 | } |
| 560 | |
Joerg Roedel | be2a022 | 2008-06-26 21:27:49 +0200 | [diff] [blame] | 561 | static void __init free_unity_maps(void) |
| 562 | { |
| 563 | struct unity_map_entry *entry, *next; |
| 564 | |
| 565 | list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { |
| 566 | list_del(&entry->list); |
| 567 | kfree(entry); |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | static int __init init_exclusion_range(struct ivmd_header *m) |
| 572 | { |
| 573 | int i; |
| 574 | |
| 575 | switch (m->type) { |
| 576 | case ACPI_IVMD_TYPE: |
| 577 | set_device_exclusion_range(m->devid, m); |
| 578 | break; |
| 579 | case ACPI_IVMD_TYPE_ALL: |
| 580 | for (i = 0; i < amd_iommu_last_bdf; ++i) |
| 581 | set_device_exclusion_range(i, m); |
| 582 | break; |
| 583 | case ACPI_IVMD_TYPE_RANGE: |
| 584 | for (i = m->devid; i <= m->aux; ++i) |
| 585 | set_device_exclusion_range(i, m); |
| 586 | break; |
| 587 | default: |
| 588 | break; |
| 589 | } |
| 590 | |
| 591 | return 0; |
| 592 | } |
| 593 | |
| 594 | static int __init init_unity_map_range(struct ivmd_header *m) |
| 595 | { |
| 596 | struct unity_map_entry *e = 0; |
| 597 | |
| 598 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
| 599 | if (e == NULL) |
| 600 | return -ENOMEM; |
| 601 | |
| 602 | switch (m->type) { |
| 603 | default: |
| 604 | case ACPI_IVMD_TYPE: |
| 605 | e->devid_start = e->devid_end = m->devid; |
| 606 | break; |
| 607 | case ACPI_IVMD_TYPE_ALL: |
| 608 | e->devid_start = 0; |
| 609 | e->devid_end = amd_iommu_last_bdf; |
| 610 | break; |
| 611 | case ACPI_IVMD_TYPE_RANGE: |
| 612 | e->devid_start = m->devid; |
| 613 | e->devid_end = m->aux; |
| 614 | break; |
| 615 | } |
| 616 | e->address_start = PAGE_ALIGN(m->range_start); |
| 617 | e->address_end = e->address_start + PAGE_ALIGN(m->range_length); |
| 618 | e->prot = m->flags >> 1; |
| 619 | |
| 620 | list_add_tail(&e->list, &amd_iommu_unity_map); |
| 621 | |
| 622 | return 0; |
| 623 | } |
| 624 | |
| 625 | static int __init init_memory_definitions(struct acpi_table_header *table) |
| 626 | { |
| 627 | u8 *p = (u8 *)table, *end = (u8 *)table; |
| 628 | struct ivmd_header *m; |
| 629 | |
| 630 | INIT_LIST_HEAD(&amd_iommu_unity_map); |
| 631 | |
| 632 | end += table->length; |
| 633 | p += IVRS_HEADER_LENGTH; |
| 634 | |
| 635 | while (p < end) { |
| 636 | m = (struct ivmd_header *)p; |
| 637 | if (m->flags & IVMD_FLAG_EXCL_RANGE) |
| 638 | init_exclusion_range(m); |
| 639 | else if (m->flags & IVMD_FLAG_UNITY_MAP) |
| 640 | init_unity_map_range(m); |
| 641 | |
| 642 | p += m->length; |
| 643 | } |
| 644 | |
| 645 | return 0; |
| 646 | } |
| 647 | |
Joerg Roedel | 8736197 | 2008-06-26 21:28:07 +0200 | [diff] [blame] | 648 | static void __init enable_iommus(void) |
| 649 | { |
| 650 | struct amd_iommu *iommu; |
| 651 | |
| 652 | list_for_each_entry(iommu, &amd_iommu_list, list) { |
| 653 | iommu_set_exclusion_range(iommu); |
| 654 | iommu_enable(iommu); |
| 655 | } |
| 656 | } |
| 657 | |
Joerg Roedel | 7441e9c | 2008-06-30 20:18:02 +0200 | [diff] [blame] | 658 | /* |
| 659 | * Suspend/Resume support |
| 660 | * disable suspend until real resume implemented |
| 661 | */ |
| 662 | |
| 663 | static int amd_iommu_resume(struct sys_device *dev) |
| 664 | { |
| 665 | return 0; |
| 666 | } |
| 667 | |
| 668 | static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) |
| 669 | { |
| 670 | return -EINVAL; |
| 671 | } |
| 672 | |
| 673 | static struct sysdev_class amd_iommu_sysdev_class = { |
| 674 | .name = "amd_iommu", |
| 675 | .suspend = amd_iommu_suspend, |
| 676 | .resume = amd_iommu_resume, |
| 677 | }; |
| 678 | |
| 679 | static struct sys_device device_amd_iommu = { |
| 680 | .id = 0, |
| 681 | .cls = &amd_iommu_sysdev_class, |
| 682 | }; |
| 683 | |
Joerg Roedel | fe74c9c | 2008-06-26 21:27:50 +0200 | [diff] [blame] | 684 | int __init amd_iommu_init(void) |
| 685 | { |
| 686 | int i, ret = 0; |
| 687 | |
| 688 | |
Joerg Roedel | 8b14518 | 2008-07-03 19:35:09 +0200 | [diff] [blame] | 689 | if (no_iommu) { |
Joerg Roedel | fe74c9c | 2008-06-26 21:27:50 +0200 | [diff] [blame] | 690 | printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); |
| 691 | return 0; |
| 692 | } |
| 693 | |
Joerg Roedel | c1cbebe | 2008-07-03 19:35:10 +0200 | [diff] [blame^] | 694 | if (!amd_iommu_detected) |
| 695 | return -ENODEV; |
| 696 | |
Joerg Roedel | fe74c9c | 2008-06-26 21:27:50 +0200 | [diff] [blame] | 697 | /* |
| 698 | * First parse ACPI tables to find the largest Bus/Dev/Func |
| 699 | * we need to handle. Upon this information the shared data |
| 700 | * structures for the IOMMUs in the system will be allocated |
| 701 | */ |
| 702 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) |
| 703 | return -ENODEV; |
| 704 | |
| 705 | dev_table_size = TBL_SIZE(DEV_TABLE_ENTRY_SIZE); |
| 706 | alias_table_size = TBL_SIZE(ALIAS_TABLE_ENTRY_SIZE); |
| 707 | rlookup_table_size = TBL_SIZE(RLOOKUP_TABLE_ENTRY_SIZE); |
| 708 | |
| 709 | ret = -ENOMEM; |
| 710 | |
| 711 | /* Device table - directly used by all IOMMUs */ |
| 712 | amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL, |
| 713 | get_order(dev_table_size)); |
| 714 | if (amd_iommu_dev_table == NULL) |
| 715 | goto out; |
| 716 | |
| 717 | /* |
| 718 | * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the |
| 719 | * IOMMU see for that device |
| 720 | */ |
| 721 | amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, |
| 722 | get_order(alias_table_size)); |
| 723 | if (amd_iommu_alias_table == NULL) |
| 724 | goto free; |
| 725 | |
| 726 | /* IOMMU rlookup table - find the IOMMU for a specific device */ |
| 727 | amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL, |
| 728 | get_order(rlookup_table_size)); |
| 729 | if (amd_iommu_rlookup_table == NULL) |
| 730 | goto free; |
| 731 | |
| 732 | /* |
| 733 | * Protection Domain table - maps devices to protection domains |
| 734 | * This table has the same size as the rlookup_table |
| 735 | */ |
| 736 | amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL, |
| 737 | get_order(rlookup_table_size)); |
| 738 | if (amd_iommu_pd_table == NULL) |
| 739 | goto free; |
| 740 | |
| 741 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(GFP_KERNEL, |
| 742 | get_order(MAX_DOMAIN_ID/8)); |
| 743 | if (amd_iommu_pd_alloc_bitmap == NULL) |
| 744 | goto free; |
| 745 | |
| 746 | /* |
| 747 | * memory is allocated now; initialize the device table with all zeroes |
| 748 | * and let all alias entries point to itself |
| 749 | */ |
| 750 | memset(amd_iommu_dev_table, 0, dev_table_size); |
| 751 | for (i = 0; i < amd_iommu_last_bdf; ++i) |
| 752 | amd_iommu_alias_table[i] = i; |
| 753 | |
| 754 | memset(amd_iommu_pd_table, 0, rlookup_table_size); |
| 755 | memset(amd_iommu_pd_alloc_bitmap, 0, MAX_DOMAIN_ID / 8); |
| 756 | |
| 757 | /* |
| 758 | * never allocate domain 0 because its used as the non-allocated and |
| 759 | * error value placeholder |
| 760 | */ |
| 761 | amd_iommu_pd_alloc_bitmap[0] = 1; |
| 762 | |
| 763 | /* |
| 764 | * now the data structures are allocated and basically initialized |
| 765 | * start the real acpi table scan |
| 766 | */ |
| 767 | ret = -ENODEV; |
| 768 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) |
| 769 | goto free; |
| 770 | |
| 771 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) |
| 772 | goto free; |
| 773 | |
Joerg Roedel | 8736197 | 2008-06-26 21:28:07 +0200 | [diff] [blame] | 774 | ret = amd_iommu_init_dma_ops(); |
| 775 | if (ret) |
| 776 | goto free; |
| 777 | |
Joerg Roedel | 7441e9c | 2008-06-30 20:18:02 +0200 | [diff] [blame] | 778 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
| 779 | if (ret) |
| 780 | goto free; |
| 781 | |
| 782 | ret = sysdev_register(&device_amd_iommu); |
| 783 | if (ret) |
| 784 | goto free; |
| 785 | |
Joerg Roedel | 8736197 | 2008-06-26 21:28:07 +0200 | [diff] [blame] | 786 | enable_iommus(); |
| 787 | |
Joerg Roedel | fe74c9c | 2008-06-26 21:27:50 +0200 | [diff] [blame] | 788 | printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n", |
| 789 | (1 << (amd_iommu_aperture_order-20))); |
| 790 | |
| 791 | printk(KERN_INFO "AMD IOMMU: device isolation "); |
| 792 | if (amd_iommu_isolate) |
| 793 | printk("enabled\n"); |
| 794 | else |
| 795 | printk("disabled\n"); |
| 796 | |
| 797 | out: |
| 798 | return ret; |
| 799 | |
| 800 | free: |
| 801 | if (amd_iommu_pd_alloc_bitmap) |
| 802 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1); |
| 803 | |
| 804 | if (amd_iommu_pd_table) |
| 805 | free_pages((unsigned long)amd_iommu_pd_table, |
| 806 | get_order(rlookup_table_size)); |
| 807 | |
| 808 | if (amd_iommu_rlookup_table) |
| 809 | free_pages((unsigned long)amd_iommu_rlookup_table, |
| 810 | get_order(rlookup_table_size)); |
| 811 | |
| 812 | if (amd_iommu_alias_table) |
| 813 | free_pages((unsigned long)amd_iommu_alias_table, |
| 814 | get_order(alias_table_size)); |
| 815 | |
| 816 | if (amd_iommu_dev_table) |
| 817 | free_pages((unsigned long)amd_iommu_dev_table, |
| 818 | get_order(dev_table_size)); |
| 819 | |
| 820 | free_iommu_all(); |
| 821 | |
| 822 | free_unity_maps(); |
| 823 | |
| 824 | goto out; |
| 825 | } |
| 826 | |
Joerg Roedel | ae7877d | 2008-06-26 21:27:51 +0200 | [diff] [blame] | 827 | static int __init early_amd_iommu_detect(struct acpi_table_header *table) |
| 828 | { |
| 829 | return 0; |
| 830 | } |
| 831 | |
| 832 | void __init amd_iommu_detect(void) |
| 833 | { |
| 834 | if (swiotlb || no_iommu || iommu_detected) |
| 835 | return; |
| 836 | |
Joerg Roedel | ae7877d | 2008-06-26 21:27:51 +0200 | [diff] [blame] | 837 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { |
| 838 | iommu_detected = 1; |
Joerg Roedel | c1cbebe | 2008-07-03 19:35:10 +0200 | [diff] [blame^] | 839 | amd_iommu_detected = 1; |
Ingo Molnar | 92af4e2 | 2008-06-27 10:48:16 +0200 | [diff] [blame] | 840 | #ifdef CONFIG_GART_IOMMU |
Joerg Roedel | ae7877d | 2008-06-26 21:27:51 +0200 | [diff] [blame] | 841 | gart_iommu_aperture_disabled = 1; |
| 842 | gart_iommu_aperture = 0; |
Ingo Molnar | 92af4e2 | 2008-06-27 10:48:16 +0200 | [diff] [blame] | 843 | #endif |
Joerg Roedel | ae7877d | 2008-06-26 21:27:51 +0200 | [diff] [blame] | 844 | } |
| 845 | } |
| 846 | |
Joerg Roedel | 918ad6c | 2008-06-26 21:27:52 +0200 | [diff] [blame] | 847 | static int __init parse_amd_iommu_options(char *str) |
| 848 | { |
| 849 | for (; *str; ++str) { |
Joerg Roedel | 918ad6c | 2008-06-26 21:27:52 +0200 | [diff] [blame] | 850 | if (strcmp(str, "isolate") == 0) |
| 851 | amd_iommu_isolate = 1; |
| 852 | } |
| 853 | |
| 854 | return 1; |
| 855 | } |
| 856 | |
| 857 | static int __init parse_amd_iommu_size_options(char *str) |
| 858 | { |
| 859 | for (; *str; ++str) { |
| 860 | if (strcmp(str, "32M") == 0) |
| 861 | amd_iommu_aperture_order = 25; |
| 862 | if (strcmp(str, "64M") == 0) |
| 863 | amd_iommu_aperture_order = 26; |
| 864 | if (strcmp(str, "128M") == 0) |
| 865 | amd_iommu_aperture_order = 27; |
| 866 | if (strcmp(str, "256M") == 0) |
| 867 | amd_iommu_aperture_order = 28; |
| 868 | if (strcmp(str, "512M") == 0) |
| 869 | amd_iommu_aperture_order = 29; |
| 870 | if (strcmp(str, "1G") == 0) |
| 871 | amd_iommu_aperture_order = 30; |
| 872 | } |
| 873 | |
| 874 | return 1; |
| 875 | } |
| 876 | |
| 877 | __setup("amd_iommu=", parse_amd_iommu_options); |
| 878 | __setup("amd_iommu_size=", parse_amd_iommu_size_options); |