Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 1 | #include <asm/paravirt.h> |
| 2 | #include <asm/asm-offsets.h> |
Glauber de Oliveira Costa | 8a650ce | 2008-01-30 13:33:19 +0100 | [diff] [blame] | 3 | #include <linux/stringify.h> |
Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 4 | |
| 5 | DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); |
| 6 | DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); |
| 7 | DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); |
| 8 | DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); |
Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 9 | DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); |
| 10 | DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); |
| 11 | DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); |
| 12 | DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); |
| 13 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); |
| 14 | DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); |
| 15 | |
Jeremy Fitzhardinge | 2be2998 | 2008-06-25 00:19:28 -0400 | [diff] [blame] | 16 | DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit"); |
| 17 | DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); |
| 18 | DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); |
Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 19 | DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); |
| 20 | |
Jeremy Fitzhardinge | 41edafd | 2009-01-28 14:35:02 -0800 | [diff] [blame] | 21 | DEF_NATIVE(, mov32, "mov %edi, %eax"); |
| 22 | DEF_NATIVE(, mov64, "mov %rdi, %rax"); |
| 23 | |
Ingo Molnar | 62c7a1e | 2015-05-11 09:47:23 +0200 | [diff] [blame] | 24 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 25 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); |
| 26 | #endif |
| 27 | |
Jeremy Fitzhardinge | 41edafd | 2009-01-28 14:35:02 -0800 | [diff] [blame] | 28 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
| 29 | { |
| 30 | return paravirt_patch_insns(insnbuf, len, |
| 31 | start__mov32, end__mov32); |
| 32 | } |
| 33 | |
| 34 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) |
| 35 | { |
| 36 | return paravirt_patch_insns(insnbuf, len, |
| 37 | start__mov64, end__mov64); |
| 38 | } |
| 39 | |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 40 | extern bool pv_is_native_spin_unlock(void); |
| 41 | |
Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 42 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
| 43 | unsigned long addr, unsigned len) |
| 44 | { |
| 45 | const unsigned char *start, *end; |
| 46 | unsigned ret; |
| 47 | |
| 48 | #define PATCH_SITE(ops, x) \ |
| 49 | case PARAVIRT_PATCH(ops.x): \ |
| 50 | start = start_##ops##_##x; \ |
| 51 | end = end_##ops##_##x; \ |
| 52 | goto patch_site |
| 53 | switch(type) { |
| 54 | PATCH_SITE(pv_irq_ops, restore_fl); |
| 55 | PATCH_SITE(pv_irq_ops, save_fl); |
| 56 | PATCH_SITE(pv_irq_ops, irq_enable); |
| 57 | PATCH_SITE(pv_irq_ops, irq_disable); |
Jeremy Fitzhardinge | 2be2998 | 2008-06-25 00:19:28 -0400 | [diff] [blame] | 58 | PATCH_SITE(pv_cpu_ops, usergs_sysret32); |
| 59 | PATCH_SITE(pv_cpu_ops, usergs_sysret64); |
Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 60 | PATCH_SITE(pv_cpu_ops, swapgs); |
| 61 | PATCH_SITE(pv_mmu_ops, read_cr2); |
| 62 | PATCH_SITE(pv_mmu_ops, read_cr3); |
| 63 | PATCH_SITE(pv_mmu_ops, write_cr3); |
| 64 | PATCH_SITE(pv_cpu_ops, clts); |
| 65 | PATCH_SITE(pv_mmu_ops, flush_tlb_single); |
| 66 | PATCH_SITE(pv_cpu_ops, wbinvd); |
Ingo Molnar | 62c7a1e | 2015-05-11 09:47:23 +0200 | [diff] [blame] | 67 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 68 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
| 69 | if (pv_is_native_spin_unlock()) { |
| 70 | start = start_pv_lock_ops_queued_spin_unlock; |
| 71 | end = end_pv_lock_ops_queued_spin_unlock; |
| 72 | goto patch_site; |
| 73 | } |
| 74 | #endif |
Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 75 | |
| 76 | default: |
| 77 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
| 78 | break; |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 79 | |
| 80 | patch_site: |
| 81 | ret = paravirt_patch_insns(ibuf, len, start, end); |
| 82 | break; |
Glauber de Oliveira Costa | 53fd13cf | 2008-01-30 13:32:10 +0100 | [diff] [blame] | 83 | } |
| 84 | #undef PATCH_SITE |
| 85 | return ret; |
| 86 | } |