blob: 29aebbcb510bd39850b4e1c40c9661c751af916c [file] [log] [blame]
Keith Busch185a3832016-01-12 13:18:10 -07001/*
2 * Volume Management Device driver
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/msi.h>
21#include <linux/pci.h>
22#include <linux/rculist.h>
23#include <linux/rcupdate.h>
24
25#include <asm/irqdomain.h>
26#include <asm/device.h>
27#include <asm/msi.h>
28#include <asm/msidef.h>
29
30#define VMD_CFGBAR 0
31#define VMD_MEMBAR1 2
32#define VMD_MEMBAR2 4
33
34/*
35 * Lock for manipulating VMD IRQ lists.
36 */
37static DEFINE_RAW_SPINLOCK(list_lock);
38
39/**
40 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
41 * @node: list item for parent traversal.
42 * @rcu: RCU callback item for freeing.
43 * @irq: back pointer to parent.
44 * @virq: the virtual IRQ value provided to the requesting driver.
45 *
46 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
47 * a VMD IRQ using this structure.
48 */
49struct vmd_irq {
50 struct list_head node;
51 struct rcu_head rcu;
52 struct vmd_irq_list *irq;
53 unsigned int virq;
54};
55
56/**
57 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
58 * @irq_list: the list of irq's the VMD one demuxes to.
59 * @vmd_vector: the h/w IRQ assigned to the VMD.
60 * @index: index into the VMD MSI-X table; used for message routing.
61 * @count: number of child IRQs assigned to this vector; used to track
62 * sharing.
63 */
64struct vmd_irq_list {
65 struct list_head irq_list;
66 struct vmd_dev *vmd;
67 unsigned int vmd_vector;
68 unsigned int index;
69 unsigned int count;
70};
71
72struct vmd_dev {
73 struct pci_dev *dev;
74
75 spinlock_t cfg_lock;
76 char __iomem *cfgbar;
77
78 int msix_count;
79 struct msix_entry *msix_entries;
80 struct vmd_irq_list *irqs;
81
82 struct pci_sysdata sysdata;
83 struct resource resources[3];
84 struct irq_domain *irq_domain;
85 struct pci_bus *bus;
86
87#ifdef CONFIG_X86_DEV_DMA_OPS
88 struct dma_map_ops dma_ops;
89 struct dma_domain dma_domain;
90#endif
91};
92
93static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
94{
95 return container_of(bus->sysdata, struct vmd_dev, sysdata);
96}
97
98/*
99 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
100 * but the MSI entry for the hardware it's driving will be programmed with a
101 * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
102 * domain into one of its own, and the VMD driver de-muxes these for the
103 * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
104 * and irq_chip to set this up.
105 */
106static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
107{
108 struct vmd_irq *vmdirq = data->chip_data;
109 struct vmd_irq_list *irq = vmdirq->irq;
110
111 msg->address_hi = MSI_ADDR_BASE_HI;
112 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(irq->index);
113 msg->data = 0;
114}
115
116/*
117 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
118 */
119static void vmd_irq_enable(struct irq_data *data)
120{
121 struct vmd_irq *vmdirq = data->chip_data;
122
123 raw_spin_lock(&list_lock);
124 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
125 raw_spin_unlock(&list_lock);
126
127 data->chip->irq_unmask(data);
128}
129
130static void vmd_irq_disable(struct irq_data *data)
131{
132 struct vmd_irq *vmdirq = data->chip_data;
133
134 data->chip->irq_mask(data);
135
136 raw_spin_lock(&list_lock);
137 list_del_rcu(&vmdirq->node);
138 raw_spin_unlock(&list_lock);
139}
140
141/*
142 * XXX: Stubbed until we develop acceptable way to not create conflicts with
143 * other devices sharing the same vector.
144 */
145static int vmd_irq_set_affinity(struct irq_data *data,
146 const struct cpumask *dest, bool force)
147{
148 return -EINVAL;
149}
150
151static struct irq_chip vmd_msi_controller = {
152 .name = "VMD-MSI",
153 .irq_enable = vmd_irq_enable,
154 .irq_disable = vmd_irq_disable,
155 .irq_compose_msi_msg = vmd_compose_msi_msg,
156 .irq_set_affinity = vmd_irq_set_affinity,
157};
158
159static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
160 msi_alloc_info_t *arg)
161{
162 return 0;
163}
164
165/*
166 * XXX: We can be even smarter selecting the best IRQ once we solve the
167 * affinity problem.
168 */
169static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd)
170{
171 int i, best = 0;
172
173 raw_spin_lock(&list_lock);
174 for (i = 1; i < vmd->msix_count; i++)
175 if (vmd->irqs[i].count < vmd->irqs[best].count)
176 best = i;
177 vmd->irqs[best].count++;
178 raw_spin_unlock(&list_lock);
179
180 return &vmd->irqs[best];
181}
182
183static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
184 unsigned int virq, irq_hw_number_t hwirq,
185 msi_alloc_info_t *arg)
186{
187 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus);
188 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
189
190 if (!vmdirq)
191 return -ENOMEM;
192
193 INIT_LIST_HEAD(&vmdirq->node);
194 vmdirq->irq = vmd_next_irq(vmd);
195 vmdirq->virq = virq;
196
197 irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
198 vmdirq, handle_simple_irq, vmd, NULL);
199 return 0;
200}
201
202static void vmd_msi_free(struct irq_domain *domain,
203 struct msi_domain_info *info, unsigned int virq)
204{
205 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
206
207 /* XXX: Potential optimization to rebalance */
208 raw_spin_lock(&list_lock);
209 vmdirq->irq->count--;
210 raw_spin_unlock(&list_lock);
211
212 kfree_rcu(vmdirq, rcu);
213}
214
215static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
216 int nvec, msi_alloc_info_t *arg)
217{
218 struct pci_dev *pdev = to_pci_dev(dev);
219 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
220
221 if (nvec > vmd->msix_count)
222 return vmd->msix_count;
223
224 memset(arg, 0, sizeof(*arg));
225 return 0;
226}
227
228static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
229{
230 arg->desc = desc;
231}
232
233static struct msi_domain_ops vmd_msi_domain_ops = {
234 .get_hwirq = vmd_get_hwirq,
235 .msi_init = vmd_msi_init,
236 .msi_free = vmd_msi_free,
237 .msi_prepare = vmd_msi_prepare,
238 .set_desc = vmd_set_desc,
239};
240
241static struct msi_domain_info vmd_msi_domain_info = {
242 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
243 MSI_FLAG_PCI_MSIX,
244 .ops = &vmd_msi_domain_ops,
245 .chip = &vmd_msi_controller,
246};
247
248#ifdef CONFIG_X86_DEV_DMA_OPS
249/*
250 * VMD replaces the requester ID with its own. DMA mappings for devices in a
251 * VMD domain need to be mapped for the VMD, not the device requiring
252 * the mapping.
253 */
254static struct device *to_vmd_dev(struct device *dev)
255{
256 struct pci_dev *pdev = to_pci_dev(dev);
257 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
258
259 return &vmd->dev->dev;
260}
261
262static struct dma_map_ops *vmd_dma_ops(struct device *dev)
263{
264 return to_vmd_dev(dev)->archdata.dma_ops;
265}
266
267static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
268 gfp_t flag, struct dma_attrs *attrs)
269{
270 return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
271 attrs);
272}
273
274static void vmd_free(struct device *dev, size_t size, void *vaddr,
275 dma_addr_t addr, struct dma_attrs *attrs)
276{
277 return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
278 attrs);
279}
280
281static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
282 void *cpu_addr, dma_addr_t addr, size_t size,
283 struct dma_attrs *attrs)
284{
285 return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
286 size, attrs);
287}
288
289static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
290 void *cpu_addr, dma_addr_t addr, size_t size,
291 struct dma_attrs *attrs)
292{
293 return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
294 addr, size, attrs);
295}
296
297static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
298 unsigned long offset, size_t size,
299 enum dma_data_direction dir,
300 struct dma_attrs *attrs)
301{
302 return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
303 dir, attrs);
304}
305
306static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
307 enum dma_data_direction dir, struct dma_attrs *attrs)
308{
309 vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
310}
311
312static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
313 enum dma_data_direction dir, struct dma_attrs *attrs)
314{
315 return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
316}
317
318static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
319 enum dma_data_direction dir, struct dma_attrs *attrs)
320{
321 vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
322}
323
324static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
325 size_t size, enum dma_data_direction dir)
326{
327 vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
328}
329
330static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
331 size_t size, enum dma_data_direction dir)
332{
333 vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
334 dir);
335}
336
337static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
338 int nents, enum dma_data_direction dir)
339{
340 vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
341}
342
343static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
344 int nents, enum dma_data_direction dir)
345{
346 vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
347}
348
349static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
350{
351 return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
352}
353
354static int vmd_dma_supported(struct device *dev, u64 mask)
355{
356 return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
357}
358
359#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
360static u64 vmd_get_required_mask(struct device *dev)
361{
362 return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
363}
364#endif
365
366static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
367{
368 struct dma_domain *domain = &vmd->dma_domain;
369
370 if (vmd->dev->dev.archdata.dma_ops)
371 del_dma_domain(domain);
372}
373
374#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
375 do { \
376 if (source->fn) \
377 dest->fn = vmd_##fn; \
378 } while (0)
379
380static void vmd_setup_dma_ops(struct vmd_dev *vmd)
381{
382 const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops;
383 struct dma_map_ops *dest = &vmd->dma_ops;
384 struct dma_domain *domain = &vmd->dma_domain;
385
386 domain->domain_nr = vmd->sysdata.domain;
387 domain->dma_ops = dest;
388
389 if (!source)
390 return;
391 ASSIGN_VMD_DMA_OPS(source, dest, alloc);
392 ASSIGN_VMD_DMA_OPS(source, dest, free);
393 ASSIGN_VMD_DMA_OPS(source, dest, mmap);
394 ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
395 ASSIGN_VMD_DMA_OPS(source, dest, map_page);
396 ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
397 ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
398 ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
399 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
400 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
401 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
402 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
403 ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
404 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
405#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
406 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
407#endif
408 add_dma_domain(domain);
409}
410#undef ASSIGN_VMD_DMA_OPS
411#else
412static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
413static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
414#endif
415
416static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
417 unsigned int devfn, int reg, int len)
418{
419 char __iomem *addr = vmd->cfgbar +
420 (bus->number << 20) + (devfn << 12) + reg;
421
422 if ((addr - vmd->cfgbar) + len >=
423 resource_size(&vmd->dev->resource[VMD_CFGBAR]))
424 return NULL;
425
426 return addr;
427}
428
429/*
430 * CPU may deadlock if config space is not serialized on some versions of this
431 * hardware, so all config space access is done under a spinlock.
432 */
433static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
434 int len, u32 *value)
435{
436 struct vmd_dev *vmd = vmd_from_bus(bus);
437 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
438 unsigned long flags;
439 int ret = 0;
440
441 if (!addr)
442 return -EFAULT;
443
444 spin_lock_irqsave(&vmd->cfg_lock, flags);
445 switch (len) {
446 case 1:
447 *value = readb(addr);
448 break;
449 case 2:
450 *value = readw(addr);
451 break;
452 case 4:
453 *value = readl(addr);
454 break;
455 default:
456 ret = -EINVAL;
457 break;
458 }
459 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
460 return ret;
461}
462
463/*
464 * VMD h/w converts non-posted config writes to posted memory writes. The
465 * read-back in this function forces the completion so it returns only after
466 * the config space was written, as expected.
467 */
468static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
469 int len, u32 value)
470{
471 struct vmd_dev *vmd = vmd_from_bus(bus);
472 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
473 unsigned long flags;
474 int ret = 0;
475
476 if (!addr)
477 return -EFAULT;
478
479 spin_lock_irqsave(&vmd->cfg_lock, flags);
480 switch (len) {
481 case 1:
482 writeb(value, addr);
483 readb(addr);
484 break;
485 case 2:
486 writew(value, addr);
487 readw(addr);
488 break;
489 case 4:
490 writel(value, addr);
491 readl(addr);
492 break;
493 default:
494 ret = -EINVAL;
495 break;
496 }
497 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
498 return ret;
499}
500
501static struct pci_ops vmd_ops = {
502 .read = vmd_pci_read,
503 .write = vmd_pci_write,
504};
505
506/*
507 * VMD domains start at 0x1000 to not clash with ACPI _SEG domains.
508 */
509static int vmd_find_free_domain(void)
510{
511 int domain = 0xffff;
512 struct pci_bus *bus = NULL;
513
514 while ((bus = pci_find_next_bus(bus)) != NULL)
515 domain = max_t(int, domain, pci_domain_nr(bus));
516 return domain + 1;
517}
518
519static int vmd_enable_domain(struct vmd_dev *vmd)
520{
521 struct pci_sysdata *sd = &vmd->sysdata;
522 struct resource *res;
523 u32 upper_bits;
524 unsigned long flags;
525 LIST_HEAD(resources);
526
527 res = &vmd->dev->resource[VMD_CFGBAR];
528 vmd->resources[0] = (struct resource) {
529 .name = "VMD CFGBAR",
530 .start = res->start,
531 .end = (resource_size(res) >> 20) - 1,
532 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
533 };
534
Keith Busch83cc54a2016-03-02 15:31:03 -0700535 /*
536 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
537 * put 32-bit resources in the window.
538 *
539 * There's no hardware reason why a 64-bit window *couldn't*
540 * contain a 32-bit resource, but pbus_size_mem() computes the
541 * bridge window size assuming a 64-bit window will contain no
542 * 32-bit resources. __pci_assign_resource() enforces that
543 * artificial restriction to make sure everything will fit.
544 *
545 * The only way we could use a 64-bit non-prefechable MEMBAR is
546 * if its address is <4GB so that we can convert it to a 32-bit
547 * resource. To be visible to the host OS, all VMD endpoints must
548 * be initially configured by platform BIOS, which includes setting
549 * up these resources. We can assume the device is configured
550 * according to the platform needs.
551 */
Keith Busch185a3832016-01-12 13:18:10 -0700552 res = &vmd->dev->resource[VMD_MEMBAR1];
553 upper_bits = upper_32_bits(res->end);
554 flags = res->flags & ~IORESOURCE_SIZEALIGN;
555 if (!upper_bits)
556 flags &= ~IORESOURCE_MEM_64;
557 vmd->resources[1] = (struct resource) {
558 .name = "VMD MEMBAR1",
559 .start = res->start,
560 .end = res->end,
561 .flags = flags,
562 };
563
564 res = &vmd->dev->resource[VMD_MEMBAR2];
565 upper_bits = upper_32_bits(res->end);
566 flags = res->flags & ~IORESOURCE_SIZEALIGN;
567 if (!upper_bits)
568 flags &= ~IORESOURCE_MEM_64;
569 vmd->resources[2] = (struct resource) {
570 .name = "VMD MEMBAR2",
571 .start = res->start + 0x2000,
572 .end = res->end,
573 .flags = flags,
574 };
575
576 sd->domain = vmd_find_free_domain();
577 if (sd->domain < 0)
578 return sd->domain;
579
580 sd->node = pcibus_to_node(vmd->dev->bus);
581
582 vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info,
583 NULL);
584 if (!vmd->irq_domain)
585 return -ENODEV;
586
587 pci_add_resource(&resources, &vmd->resources[0]);
588 pci_add_resource(&resources, &vmd->resources[1]);
589 pci_add_resource(&resources, &vmd->resources[2]);
590 vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
591 &resources);
592 if (!vmd->bus) {
593 pci_free_resource_list(&resources);
594 irq_domain_remove(vmd->irq_domain);
595 return -ENODEV;
596 }
597
598 vmd_setup_dma_ops(vmd);
599 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
600 pci_rescan_bus(vmd->bus);
601
602 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
603 "domain"), "Can't create symlink to domain\n");
604 return 0;
605}
606
607static irqreturn_t vmd_irq(int irq, void *data)
608{
609 struct vmd_irq_list *irqs = data;
610 struct vmd_irq *vmdirq;
611
612 rcu_read_lock();
613 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
614 generic_handle_irq(vmdirq->virq);
615 rcu_read_unlock();
616
617 return IRQ_HANDLED;
618}
619
620static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
621{
622 struct vmd_dev *vmd;
623 int i, err;
624
625 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
626 return -ENOMEM;
627
628 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
629 if (!vmd)
630 return -ENOMEM;
631
632 vmd->dev = dev;
633 err = pcim_enable_device(dev);
634 if (err < 0)
635 return err;
636
637 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
638 if (!vmd->cfgbar)
639 return -ENOMEM;
640
641 pci_set_master(dev);
642 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
643 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
644 return -ENODEV;
645
646 vmd->msix_count = pci_msix_vec_count(dev);
647 if (vmd->msix_count < 0)
648 return -ENODEV;
649
650 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
651 GFP_KERNEL);
652 if (!vmd->irqs)
653 return -ENOMEM;
654
655 vmd->msix_entries = devm_kcalloc(&dev->dev, vmd->msix_count,
656 sizeof(*vmd->msix_entries),
657 GFP_KERNEL);
658 if (!vmd->msix_entries)
659 return -ENOMEM;
660 for (i = 0; i < vmd->msix_count; i++)
661 vmd->msix_entries[i].entry = i;
662
663 vmd->msix_count = pci_enable_msix_range(vmd->dev, vmd->msix_entries, 1,
664 vmd->msix_count);
665 if (vmd->msix_count < 0)
666 return vmd->msix_count;
667
668 for (i = 0; i < vmd->msix_count; i++) {
669 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
670 vmd->irqs[i].vmd_vector = vmd->msix_entries[i].vector;
671 vmd->irqs[i].index = i;
672
673 err = devm_request_irq(&dev->dev, vmd->irqs[i].vmd_vector,
674 vmd_irq, 0, "vmd", &vmd->irqs[i]);
675 if (err)
676 return err;
677 }
678
679 spin_lock_init(&vmd->cfg_lock);
680 pci_set_drvdata(dev, vmd);
681 err = vmd_enable_domain(vmd);
682 if (err)
683 return err;
684
685 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
686 vmd->sysdata.domain);
687 return 0;
688}
689
690static void vmd_remove(struct pci_dev *dev)
691{
692 struct vmd_dev *vmd = pci_get_drvdata(dev);
693
694 pci_set_drvdata(dev, NULL);
695 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
696 pci_stop_root_bus(vmd->bus);
697 pci_remove_root_bus(vmd->bus);
698 vmd_teardown_dma_ops(vmd);
699 irq_domain_remove(vmd->irq_domain);
700}
701
702#ifdef CONFIG_PM
703static int vmd_suspend(struct device *dev)
704{
705 struct pci_dev *pdev = to_pci_dev(dev);
706
707 pci_save_state(pdev);
708 return 0;
709}
710
711static int vmd_resume(struct device *dev)
712{
713 struct pci_dev *pdev = to_pci_dev(dev);
714
715 pci_restore_state(pdev);
716 return 0;
717}
718#endif
719static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
720
721static const struct pci_device_id vmd_ids[] = {
722 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
723 {0,}
724};
725MODULE_DEVICE_TABLE(pci, vmd_ids);
726
727static struct pci_driver vmd_drv = {
728 .name = "vmd",
729 .id_table = vmd_ids,
730 .probe = vmd_probe,
731 .remove = vmd_remove,
732 .driver = {
733 .pm = &vmd_dev_pm_ops,
734 },
735};
736module_pci_driver(vmd_drv);
737
738MODULE_AUTHOR("Intel Corporation");
739MODULE_LICENSE("GPL v2");
740MODULE_VERSION("0.6");