Tom Lendacky | 6ebcb06 | 2017-07-17 16:10:32 -0500 | [diff] [blame^] | 1 | /* |
| 2 | * AMD Memory Encryption Support |
| 3 | * |
| 4 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
| 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/linkage.h> |
| 14 | #include <asm/pgtable.h> |
| 15 | #include <asm/page.h> |
| 16 | #include <asm/processor-flags.h> |
| 17 | #include <asm/msr-index.h> |
| 18 | #include <asm/frame.h> |
| 19 | |
| 20 | .text |
| 21 | .code64 |
| 22 | ENTRY(sme_encrypt_execute) |
| 23 | |
| 24 | /* |
| 25 | * Entry parameters: |
| 26 | * RDI - virtual address for the encrypted kernel mapping |
| 27 | * RSI - virtual address for the decrypted kernel mapping |
| 28 | * RDX - length of kernel |
| 29 | * RCX - virtual address of the encryption workarea, including: |
| 30 | * - stack page (PAGE_SIZE) |
| 31 | * - encryption routine page (PAGE_SIZE) |
| 32 | * - intermediate copy buffer (PMD_PAGE_SIZE) |
| 33 | * R8 - physcial address of the pagetables to use for encryption |
| 34 | */ |
| 35 | |
| 36 | FRAME_BEGIN /* RBP now has original stack pointer */ |
| 37 | |
| 38 | /* Set up a one page stack in the non-encrypted memory area */ |
| 39 | movq %rcx, %rax /* Workarea stack page */ |
| 40 | leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */ |
| 41 | addq $PAGE_SIZE, %rax /* Workarea encryption routine */ |
| 42 | |
| 43 | push %r12 |
| 44 | movq %rdi, %r10 /* Encrypted kernel */ |
| 45 | movq %rsi, %r11 /* Decrypted kernel */ |
| 46 | movq %rdx, %r12 /* Kernel length */ |
| 47 | |
| 48 | /* Copy encryption routine into the workarea */ |
| 49 | movq %rax, %rdi /* Workarea encryption routine */ |
| 50 | leaq __enc_copy(%rip), %rsi /* Encryption routine */ |
| 51 | movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */ |
| 52 | rep movsb |
| 53 | |
| 54 | /* Setup registers for call */ |
| 55 | movq %r10, %rdi /* Encrypted kernel */ |
| 56 | movq %r11, %rsi /* Decrypted kernel */ |
| 57 | movq %r8, %rdx /* Pagetables used for encryption */ |
| 58 | movq %r12, %rcx /* Kernel length */ |
| 59 | movq %rax, %r8 /* Workarea encryption routine */ |
| 60 | addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ |
| 61 | |
| 62 | call *%rax /* Call the encryption routine */ |
| 63 | |
| 64 | pop %r12 |
| 65 | |
| 66 | movq %rbp, %rsp /* Restore original stack pointer */ |
| 67 | FRAME_END |
| 68 | |
| 69 | ret |
| 70 | ENDPROC(sme_encrypt_execute) |
| 71 | |
| 72 | ENTRY(__enc_copy) |
| 73 | /* |
| 74 | * Routine used to encrypt kernel. |
| 75 | * This routine must be run outside of the kernel proper since |
| 76 | * the kernel will be encrypted during the process. So this |
| 77 | * routine is defined here and then copied to an area outside |
| 78 | * of the kernel where it will remain and run decrypted |
| 79 | * during execution. |
| 80 | * |
| 81 | * On entry the registers must be: |
| 82 | * RDI - virtual address for the encrypted kernel mapping |
| 83 | * RSI - virtual address for the decrypted kernel mapping |
| 84 | * RDX - address of the pagetables to use for encryption |
| 85 | * RCX - length of kernel |
| 86 | * R8 - intermediate copy buffer |
| 87 | * |
| 88 | * RAX - points to this routine |
| 89 | * |
| 90 | * The kernel will be encrypted by copying from the non-encrypted |
| 91 | * kernel space to an intermediate buffer and then copying from the |
| 92 | * intermediate buffer back to the encrypted kernel space. The physical |
| 93 | * addresses of the two kernel space mappings are the same which |
| 94 | * results in the kernel being encrypted "in place". |
| 95 | */ |
| 96 | /* Enable the new page tables */ |
| 97 | mov %rdx, %cr3 |
| 98 | |
| 99 | /* Flush any global TLBs */ |
| 100 | mov %cr4, %rdx |
| 101 | andq $~X86_CR4_PGE, %rdx |
| 102 | mov %rdx, %cr4 |
| 103 | orq $X86_CR4_PGE, %rdx |
| 104 | mov %rdx, %cr4 |
| 105 | |
| 106 | /* Set the PAT register PA5 entry to write-protect */ |
| 107 | push %rcx |
| 108 | movl $MSR_IA32_CR_PAT, %ecx |
| 109 | rdmsr |
| 110 | push %rdx /* Save original PAT value */ |
| 111 | andl $0xffff00ff, %edx /* Clear PA5 */ |
| 112 | orl $0x00000500, %edx /* Set PA5 to WP */ |
| 113 | wrmsr |
| 114 | pop %rdx /* RDX contains original PAT value */ |
| 115 | pop %rcx |
| 116 | |
| 117 | movq %rcx, %r9 /* Save kernel length */ |
| 118 | movq %rdi, %r10 /* Save encrypted kernel address */ |
| 119 | movq %rsi, %r11 /* Save decrypted kernel address */ |
| 120 | |
| 121 | wbinvd /* Invalidate any cache entries */ |
| 122 | |
| 123 | /* Copy/encrypt 2MB at a time */ |
| 124 | 1: |
| 125 | movq %r11, %rsi /* Source - decrypted kernel */ |
| 126 | movq %r8, %rdi /* Dest - intermediate copy buffer */ |
| 127 | movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ |
| 128 | rep movsb |
| 129 | |
| 130 | movq %r8, %rsi /* Source - intermediate copy buffer */ |
| 131 | movq %r10, %rdi /* Dest - encrypted kernel */ |
| 132 | movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ |
| 133 | rep movsb |
| 134 | |
| 135 | addq $PMD_PAGE_SIZE, %r11 |
| 136 | addq $PMD_PAGE_SIZE, %r10 |
| 137 | subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */ |
| 138 | jnz 1b /* Kernel length not zero? */ |
| 139 | |
| 140 | /* Restore PAT register */ |
| 141 | push %rdx /* Save original PAT value */ |
| 142 | movl $MSR_IA32_CR_PAT, %ecx |
| 143 | rdmsr |
| 144 | pop %rdx /* Restore original PAT value */ |
| 145 | wrmsr |
| 146 | |
| 147 | ret |
| 148 | .L__enc_copy_end: |
| 149 | ENDPROC(__enc_copy) |