11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
24 */
25 /*
26 * Copyright (c) 2009, Intel Corporation.
27 * All rights reserved.
28 */
29 /*
30 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
31 */
32
33 /*
34 * DVMA code
35 * This file contains Intel IOMMU code that deals with DVMA
36 * i.e. DMA remapping.
37 */
38
39 #include <sys/sysmacros.h>
40 #include <sys/pcie.h>
41 #include <sys/pci_cfgspace.h>
42 #include <vm/hat_i86.h>
43 #include <sys/memlist.h>
44 #include <sys/acpi/acpi.h>
45 #include <sys/acpica.h>
46 #include <sys/modhash.h>
47 #include <sys/immu.h>
48 #include <sys/x86_archext.h>
49 #include <sys/archsystm.h>
50
51 #undef TEST
52
53 /*
54 * Macros based on PCI spec
55 */
56 #define IMMU_PCI_REV2CLASS(r) ((r) >> 8) /* classcode from revid */
57 #define IMMU_PCI_CLASS2BASE(c) ((c) >> 16) /* baseclass from classcode */
58 #define IMMU_PCI_CLASS2SUB(c) (((c) >> 8) & 0xff); /* classcode */
59
60 #define IMMU_CONTIG_PADDR(d, p) \
61 ((d).dck_paddr && ((d).dck_paddr + IMMU_PAGESIZE) == (p))
62
63 typedef struct dvma_arg {
64 immu_t *dva_immu;
65 dev_info_t *dva_rdip;
66 dev_info_t *dva_ddip;
67 domain_t *dva_domain;
68 int dva_level;
69 immu_flags_t dva_flags;
70 list_t *dva_list;
71 int dva_error;
72 } dvma_arg_t;
73
74 static domain_t *domain_create(immu_t *immu, dev_info_t *ddip,
75 dev_info_t *rdip, immu_flags_t immu_flags);
76 static immu_devi_t *create_immu_devi(dev_info_t *rdip, int bus,
77 int dev, int func, immu_flags_t immu_flags);
78 static void destroy_immu_devi(immu_devi_t *immu_devi);
79 static boolean_t dvma_map(domain_t *domain, uint64_t sdvma,
80 uint64_t nvpages, immu_dcookie_t *dcookies, int dcount, dev_info_t *rdip,
81 immu_flags_t immu_flags);
2108 pgtable = xlate->xlt_pgtable;
2109 idx = xlate->xlt_idx;
2110
2111 dvma = *dvma_ptr;
2112 nvpages = *nvpages_ptr;
2113
2114 /*
2115 * since a caller gets a unique dvma for a physical address,
2116 * no other concurrent thread will be writing to the same
2117 * PTE even if it has the same paddr. So no locks needed.
2118 */
2119 shwp = (hw_pdte_t *)(pgtable->hwpg_vaddr) + idx;
2120
2121 hwp = shwp;
2122 for (j = dcount - 1; j >= 0; j--) {
2123 if (nvpages <= dcookies[j].dck_npages)
2124 break;
2125 nvpages -= dcookies[j].dck_npages;
2126 }
2127
2128 nppages = nvpages;
2129 paddr = dcookies[j].dck_paddr +
2130 (dcookies[j].dck_npages - nppages) * IMMU_PAGESIZE;
2131
2132 nvpages = *nvpages_ptr;
2133 nset = 0;
2134 for (; nvpages > 0 && idx <= IMMU_PGTABLE_MAXIDX; idx++, hwp++) {
2135 PTE_set_one(immu, hwp, paddr, rdip, immu_flags);
2136 nset++;
2137
2138 ASSERT(PDTE_check(immu, *hwp, NULL, paddr, rdip, immu_flags)
2139 == B_TRUE);
2140 nppages--;
2141 nvpages--;
2142 paddr += IMMU_PAGESIZE;
2143 dvma += IMMU_PAGESIZE;
2144
2145 if (nppages == 0) {
2146 j++;
2147 }
2649 pde_set = 0;
2650 npages = 1;
2651 size -= psize;
2652 while (size > 0) {
2653 /* get the size for this page (i.e. partial or full page) */
2654 psize = MIN(size, MMU_PAGESIZE);
2655 if (buftype == DMA_OTYP_PAGES) {
2656 /* get the paddr from the page_t */
2657 paddr = pfn_to_pa(page->p_pagenum);
2658 page = page->p_next;
2659 } else if (pparray != NULL) {
2660 /* index into the array of page_t's to get the paddr */
2661 paddr = pfn_to_pa(pparray[pcnt]->p_pagenum);
2662 pcnt++;
2663 } else {
2664 /* call into the VM to get the paddr */
2665 paddr = pfn_to_pa(hat_getpfnum(vas->a_hat, vaddr));
2666 vaddr += psize;
2667 }
2668
2669 npages++;
2670
2671 if (ihp->ihp_npremapped > 0) {
2672 *ihp->ihp_preptes[npages - 1] =
2673 PDTE_PADDR(paddr) | rwmask;
2674 } else if (IMMU_CONTIG_PADDR(dcookies[dmax], paddr)) {
2675 dcookies[dmax].dck_npages++;
2676 } else {
2677 /* No, we need a new dcookie */
2678 if (dmax == (IMMU_NDCK - 1)) {
2679 /*
2680 * Ran out of dcookies. Map them now.
2681 */
2682 if (dvma_map(domain, dvma,
2683 npages, dcookies, dmax + 1, rdip,
2684 immu_flags))
2685 pde_set++;
2686
2687 IMMU_DPROBE4(immu__dvmamap__early,
2688 dev_info_t *, rdip, uint64_t, dvma,
2689 uint_t, npages, uint_t, dmax+1);
2690
2691 dvma += (npages << IMMU_PAGESHIFT);
2692 npages = 0;
2693 dmax = 0;
2694 } else
2695 dmax++;
2696 dcookies[dmax].dck_paddr = paddr;
2697 dcookies[dmax].dck_npages = 1;
2698 }
2699 size -= psize;
2700 }
2701
2702 /*
2703 * Finish up, mapping all, or all of the remaining,
2704 * physical memory ranges.
2705 */
2706 if (ihp->ihp_npremapped == 0 && npages > 0) {
2707 IMMU_DPROBE4(immu__dvmamap__late, dev_info_t *, rdip, \
2708 uint64_t, dvma, uint_t, npages, uint_t, dmax+1);
2709
2710 if (dvma_map(domain, dvma, npages, dcookies,
2711 dmax + 1, rdip, immu_flags))
2712 pde_set++;
2713 }
2714
2715 /* Invalidate the IOTLB */
2716 immu_flush_iotlb_psi(immu, domain->dom_did, sdvma, npgalloc,
2717 pde_set > 0 ? TLB_IVA_WHOLE : TLB_IVA_LEAF,
2718 &ihp->ihp_inv_wait);
2719
|
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
24 */
25 /*
26 * Copyright (c) 2009, Intel Corporation.
27 * All rights reserved.
28 */
29 /*
30 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
31 * Copyright 2017 Joyent, Inc.
32 */
33
34 /*
35 * DVMA code
36 * This file contains Intel IOMMU code that deals with DVMA
37 * i.e. DMA remapping.
38 */
39
40 #include <sys/sysmacros.h>
41 #include <sys/pcie.h>
42 #include <sys/pci_cfgspace.h>
43 #include <vm/hat_i86.h>
44 #include <sys/memlist.h>
45 #include <sys/acpi/acpi.h>
46 #include <sys/acpica.h>
47 #include <sys/modhash.h>
48 #include <sys/immu.h>
49 #include <sys/x86_archext.h>
50 #include <sys/archsystm.h>
51
52 #undef TEST
53
54 /*
55 * Macros based on PCI spec
56 */
57 #define IMMU_PCI_REV2CLASS(r) ((r) >> 8) /* classcode from revid */
58 #define IMMU_PCI_CLASS2BASE(c) ((c) >> 16) /* baseclass from classcode */
59 #define IMMU_PCI_CLASS2SUB(c) (((c) >> 8) & 0xff); /* classcode */
60
61 #define IMMU_CONTIG_PADDR(d, p) \
62 ((d).dck_paddr && ((d).dck_paddr + (d).dck_npages * IMMU_PAGESIZE) \
63 == (p))
64
65 typedef struct dvma_arg {
66 immu_t *dva_immu;
67 dev_info_t *dva_rdip;
68 dev_info_t *dva_ddip;
69 domain_t *dva_domain;
70 int dva_level;
71 immu_flags_t dva_flags;
72 list_t *dva_list;
73 int dva_error;
74 } dvma_arg_t;
75
76 static domain_t *domain_create(immu_t *immu, dev_info_t *ddip,
77 dev_info_t *rdip, immu_flags_t immu_flags);
78 static immu_devi_t *create_immu_devi(dev_info_t *rdip, int bus,
79 int dev, int func, immu_flags_t immu_flags);
80 static void destroy_immu_devi(immu_devi_t *immu_devi);
81 static boolean_t dvma_map(domain_t *domain, uint64_t sdvma,
82 uint64_t nvpages, immu_dcookie_t *dcookies, int dcount, dev_info_t *rdip,
83 immu_flags_t immu_flags);
2110 pgtable = xlate->xlt_pgtable;
2111 idx = xlate->xlt_idx;
2112
2113 dvma = *dvma_ptr;
2114 nvpages = *nvpages_ptr;
2115
2116 /*
2117 * since a caller gets a unique dvma for a physical address,
2118 * no other concurrent thread will be writing to the same
2119 * PTE even if it has the same paddr. So no locks needed.
2120 */
2121 shwp = (hw_pdte_t *)(pgtable->hwpg_vaddr) + idx;
2122
2123 hwp = shwp;
2124 for (j = dcount - 1; j >= 0; j--) {
2125 if (nvpages <= dcookies[j].dck_npages)
2126 break;
2127 nvpages -= dcookies[j].dck_npages;
2128 }
2129
2130 VERIFY(j >= 0);
2131 nppages = nvpages;
2132 paddr = dcookies[j].dck_paddr +
2133 (dcookies[j].dck_npages - nppages) * IMMU_PAGESIZE;
2134
2135 nvpages = *nvpages_ptr;
2136 nset = 0;
2137 for (; nvpages > 0 && idx <= IMMU_PGTABLE_MAXIDX; idx++, hwp++) {
2138 PTE_set_one(immu, hwp, paddr, rdip, immu_flags);
2139 nset++;
2140
2141 ASSERT(PDTE_check(immu, *hwp, NULL, paddr, rdip, immu_flags)
2142 == B_TRUE);
2143 nppages--;
2144 nvpages--;
2145 paddr += IMMU_PAGESIZE;
2146 dvma += IMMU_PAGESIZE;
2147
2148 if (nppages == 0) {
2149 j++;
2150 }
2652 pde_set = 0;
2653 npages = 1;
2654 size -= psize;
2655 while (size > 0) {
2656 /* get the size for this page (i.e. partial or full page) */
2657 psize = MIN(size, MMU_PAGESIZE);
2658 if (buftype == DMA_OTYP_PAGES) {
2659 /* get the paddr from the page_t */
2660 paddr = pfn_to_pa(page->p_pagenum);
2661 page = page->p_next;
2662 } else if (pparray != NULL) {
2663 /* index into the array of page_t's to get the paddr */
2664 paddr = pfn_to_pa(pparray[pcnt]->p_pagenum);
2665 pcnt++;
2666 } else {
2667 /* call into the VM to get the paddr */
2668 paddr = pfn_to_pa(hat_getpfnum(vas->a_hat, vaddr));
2669 vaddr += psize;
2670 }
2671
2672 if (ihp->ihp_npremapped > 0) {
2673 *ihp->ihp_preptes[npages] =
2674 PDTE_PADDR(paddr) | rwmask;
2675 } else if (IMMU_CONTIG_PADDR(dcookies[dmax], paddr)) {
2676 dcookies[dmax].dck_npages++;
2677 } else {
2678 /* No, we need a new dcookie */
2679 if (dmax == (IMMU_NDCK - 1)) {
2680 /*
2681 * Ran out of dcookies. Map them now.
2682 */
2683 if (dvma_map(domain, dvma,
2684 npages, dcookies, dmax + 1, rdip,
2685 immu_flags))
2686 pde_set++;
2687
2688 IMMU_DPROBE4(immu__dvmamap__early,
2689 dev_info_t *, rdip, uint64_t, dvma,
2690 uint_t, npages, uint_t, dmax+1);
2691
2692 dvma += (npages << IMMU_PAGESHIFT);
2693 npages = 0;
2694 dmax = 0;
2695 } else {
2696 dmax++;
2697 }
2698 dcookies[dmax].dck_paddr = paddr;
2699 dcookies[dmax].dck_npages = 1;
2700 }
2701 size -= psize;
2702 if (npages != 0)
2703 npages++;
2704 }
2705
2706 /*
2707 * Finish up, mapping all, or all of the remaining,
2708 * physical memory ranges.
2709 */
2710 if (ihp->ihp_npremapped == 0 && npages > 0) {
2711 IMMU_DPROBE4(immu__dvmamap__late, dev_info_t *, rdip, \
2712 uint64_t, dvma, uint_t, npages, uint_t, dmax+1);
2713
2714 if (dvma_map(domain, dvma, npages, dcookies,
2715 dmax + 1, rdip, immu_flags))
2716 pde_set++;
2717 }
2718
2719 /* Invalidate the IOTLB */
2720 immu_flush_iotlb_psi(immu, domain->dom_did, sdvma, npgalloc,
2721 pde_set > 0 ? TLB_IVA_WHOLE : TLB_IVA_LEAF,
2722 &ihp->ihp_inv_wait);
2723
|