2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
26 #include <linux/list.h>
27 #include <linux/kvm_host.h>
28 #include <linux/moduleparam.h>
29 #include <linux/pci.h>
30 #include <linux/stat.h>
31 #include <linux/iommu.h>
32 #include "assigned-dev.h"
34 static bool allow_unsafe_assigned_interrupts;
35 module_param_named(allow_unsafe_assigned_interrupts,
36 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
38 "Enable device assignment on platforms without interrupt remapping support.");
40 static int kvm_iommu_unmap_memslots(struct kvm *kvm);
41 static void kvm_iommu_put_pages(struct kvm *kvm,
42 gfn_t base_gfn, unsigned long npages);
44 static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
50 pfn = gfn_to_pfn_memslot(slot, gfn);
51 end_gfn = gfn + npages;
54 if (is_error_noslot_pfn(pfn))
58 gfn_to_pfn_memslot(slot, gfn++);
63 static void kvm_unpin_pages(struct kvm *kvm, kvm_pfn_t pfn,
68 for (i = 0; i < npages; ++i)
69 kvm_release_pfn_clean(pfn + i);
72 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
77 struct iommu_domain *domain = kvm->arch.iommu_domain;
80 /* check if iommu exists and in use */
85 end_gfn = gfn + slot->npages;
88 if (!(slot->flags & KVM_MEM_READONLY))
90 if (!kvm->arch.iommu_noncoherent)
94 while (gfn < end_gfn) {
95 unsigned long page_size;
97 /* Check if already mapped */
98 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
103 /* Get the page size we could use to map */
104 page_size = kvm_host_page_size(kvm, gfn);
106 /* Make sure the page_size does not exceed the memslot */
107 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
110 /* Make sure gfn is aligned to the page size we want to map */
111 while ((gfn << PAGE_SHIFT) & (page_size - 1))
114 /* Make sure hva is aligned to the page size we want to map */
115 while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
119 * Pin all pages we are about to map in memory. This is
120 * important because we unmap and unpin in 4kb steps later.
122 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
123 if (is_error_noslot_pfn(pfn)) {
128 /* Map into IO address space */
129 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
132 printk(KERN_ERR "kvm_iommu_map_address:"
133 "iommu failed to map pfn=%llx\n", pfn);
134 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
138 gfn += page_size >> PAGE_SHIFT;
146 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
150 static int kvm_iommu_map_memslots(struct kvm *kvm)
153 struct kvm_memslots *slots;
154 struct kvm_memory_slot *memslot;
156 if (kvm->arch.iommu_noncoherent)
157 kvm_arch_register_noncoherent_dma(kvm);
159 idx = srcu_read_lock(&kvm->srcu);
160 slots = kvm_memslots(kvm);
162 kvm_for_each_memslot(memslot, slots) {
163 r = kvm_iommu_map_pages(kvm, memslot);
167 srcu_read_unlock(&kvm->srcu, idx);
172 int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
174 struct iommu_domain *domain = kvm->arch.iommu_domain;
178 /* check if iommu exists and in use */
185 r = iommu_attach_device(domain, &pdev->dev);
187 dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
191 noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
193 /* Check if need to update IOMMU page table for guest memory */
194 if (noncoherent != kvm->arch.iommu_noncoherent) {
195 kvm_iommu_unmap_memslots(kvm);
196 kvm->arch.iommu_noncoherent = noncoherent;
197 r = kvm_iommu_map_memslots(kvm);
202 kvm_arch_start_assignment(kvm);
203 pci_set_dev_assigned(pdev);
205 dev_info(&pdev->dev, "kvm assign device\n");
209 kvm_iommu_unmap_memslots(kvm);
213 int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
215 struct iommu_domain *domain = kvm->arch.iommu_domain;
217 /* check if iommu exists and in use */
224 iommu_detach_device(domain, &pdev->dev);
226 pci_clear_dev_assigned(pdev);
227 kvm_arch_end_assignment(kvm);
229 dev_info(&pdev->dev, "kvm deassign device\n");
234 int kvm_iommu_map_guest(struct kvm *kvm)
238 if (!iommu_present(&pci_bus_type)) {
239 printk(KERN_ERR "%s: iommu not found\n", __func__);
243 mutex_lock(&kvm->slots_lock);
245 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
246 if (!kvm->arch.iommu_domain) {
251 if (!allow_unsafe_assigned_interrupts &&
252 !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
253 printk(KERN_WARNING "%s: No interrupt remapping support,"
254 " disallowing device assignment."
255 " Re-enable with \"allow_unsafe_assigned_interrupts=1\""
256 " module option.\n", __func__);
257 iommu_domain_free(kvm->arch.iommu_domain);
258 kvm->arch.iommu_domain = NULL;
263 r = kvm_iommu_map_memslots(kvm);
265 kvm_iommu_unmap_memslots(kvm);
268 mutex_unlock(&kvm->slots_lock);
272 static void kvm_iommu_put_pages(struct kvm *kvm,
273 gfn_t base_gfn, unsigned long npages)
275 struct iommu_domain *domain;
280 domain = kvm->arch.iommu_domain;
281 end_gfn = base_gfn + npages;
284 /* check if iommu exists and in use */
288 while (gfn < end_gfn) {
289 unsigned long unmap_pages;
292 /* Get physical address */
293 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
300 pfn = phys >> PAGE_SHIFT;
302 /* Unmap address from IO address space */
303 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
304 unmap_pages = 1ULL << get_order(size);
306 /* Unpin all pages we just unmapped to not leak any memory */
307 kvm_unpin_pages(kvm, pfn, unmap_pages);
315 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
317 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
320 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
323 struct kvm_memslots *slots;
324 struct kvm_memory_slot *memslot;
326 idx = srcu_read_lock(&kvm->srcu);
327 slots = kvm_memslots(kvm);
329 kvm_for_each_memslot(memslot, slots)
330 kvm_iommu_unmap_pages(kvm, memslot);
332 srcu_read_unlock(&kvm->srcu, idx);
334 if (kvm->arch.iommu_noncoherent)
335 kvm_arch_unregister_noncoherent_dma(kvm);
340 int kvm_iommu_unmap_guest(struct kvm *kvm)
342 struct iommu_domain *domain = kvm->arch.iommu_domain;
344 /* check if iommu exists and in use */
348 mutex_lock(&kvm->slots_lock);
349 kvm_iommu_unmap_memslots(kvm);
350 kvm->arch.iommu_domain = NULL;
351 kvm->arch.iommu_noncoherent = false;
352 mutex_unlock(&kvm->slots_lock);
354 iommu_domain_free(domain);