aboutsummaryrefslogtreecommitdiffstats
path: root/com32/include/x86/cpu.h
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2019-02-04 12:16:57 -0800
committerH. Peter Anvin <hpa@zytor.com>2019-02-04 12:16:57 -0800
commit35badfce1f4855a32d089e1a9d8c098e3ef343c7 (patch)
treeb114358b6bac8eb97d7b3bf395e7eaeb7a1de1e5 /com32/include/x86/cpu.h
parent621770d33e6da95556ccfc4f08fd16edb8300c63 (diff)
downloadsyslinux-35badfce1f4855a32d089e1a9d8c098e3ef343c7.tar.gz
syslinux-35badfce1f4855a32d089e1a9d8c098e3ef343c7.tar.xz
syslinux-35badfce1f4855a32d089e1a9d8c098e3ef343c7.zip
Reorganize and clean up a bunch of the x86 code
We were doing a bunch of i386-specific things even on x86-64. Fix this, and merge x86 definitions where possible. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'com32/include/x86/cpu.h')
-rw-r--r--com32/include/x86/cpu.h206
1 files changed, 206 insertions, 0 deletions
diff --git a/com32/include/x86/cpu.h b/com32/include/x86/cpu.h
new file mode 100644
index 00000000..5bdc74de
--- /dev/null
+++ b/com32/include/x86/cpu.h
@@ -0,0 +1,206 @@
+#ifndef _X86_CPU_H
+#define _X86_CPU_H
+
+#include <klibc/compiler.h>
+#include <inttypes.h>
+#include <stdbool.h>
+
+static inline bool cpu_has_eflag(unsigned long flag)
+{
+ unsigned long f1, f2;
+
+ asm("pushf ; "
+ "pushf ; "
+ "pop %0 ; "
+ "mov %0,%1 ; "
+ "xor %2,%1 ; "
+ "push %1 ; "
+ "popf ; "
+ "pushf ; "
+ "pop %1 ; "
+ "popf"
+ : "=r" (f1), "=r" (f2) : "ri" (flag));
+
+ return !!((f1 ^ f2) & flag);
+}
+
+static inline uint64_t rdtsc(void)
+{
+ uint32_t eax, edx;
+ asm volatile("rdtsc" : "=a" (eax), "=d" (edx));
+ return eax + ((uint64_t)edx << 32);
+}
+
+static inline uint32_t rdtscl(void)
+{
+ uint32_t v;
+ asm volatile("rdtsc" : "=a" (v) : : "edx");
+ return v;
+}
+
+static inline void cpuid_count(uint32_t op, uint32_t cnt,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+#if defined(__i386__) && defined(__PIC__)
+ asm volatile("movl %%ebx,%1 ; "
+ "cpuid ; "
+ "xchgl %1,%%ebx"
+ : "=a" (*eax), "=SD" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "a"(op), "c"(cnt));
+#else
+ asm volatile("cpuid"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "a"(op), "c"(cnt));
+#endif
+}
+
+static inline void cpuid(uint32_t op,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ cpuid_count(op, 0, eax, ebx, ecx, edx);
+}
+
+static inline __constfunc uint32_t cpuid_eax(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return eax;
+}
+
+static inline __constfunc uint32_t cpuid_ebx(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return ebx;
+}
+
+static inline __constfunc uint32_t cpuid_ecx(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return ecx;
+}
+
+static inline __constfunc uint32_t cpuid_edx(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return edx;
+}
+
+static inline uint64_t rdmsr(uint32_t msr)
+{
+ uint32_t eax, edx;
+
+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (msr));
+ return eax + ((uint64_t)edx << 32);
+}
+
+static inline void wrmsr(uint64_t v, uint32_t msr)
+{
+ uint32_t eax = v;
+ uint32_t edx = v >> 32;
+
+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (msr));
+}
+
+static inline unsigned long long get_cr0(void)
+{
+ unsigned long v;
+ asm volatile("mov %%cr0,%0" : "=r" (v));
+ return v;
+}
+
+static inline void set_cr0(unsigned long v)
+{
+ asm volatile("mov %0,%%cr0" : : "r" (v));
+}
+
+static inline unsigned long get_cr4(void)
+{
+ unsigned long v;
+ asm volatile("mov %%cr4,%0" : "=r" (v));
+ return v;
+}
+
+static inline void set_cr4(unsigned long v)
+{
+ asm volatile("mov %0,%%cr4" : : "r" (v));
+}
+
+static inline uint32_t get_mxcsr(void)
+{
+ uint32_t v;
+ asm("stmxcsr %0" : "=m" (v));
+ return v;
+}
+
+static inline void set_mxcsr(uint32_t v)
+{
+ asm volatile("ldmxcsr %0" : : "m" (v));
+}
+
+static inline void fninit(void)
+{
+ asm volatile("fninit");
+}
+
+static inline void cpu_relax(void)
+{
+ asm volatile("rep ; nop");
+}
+
+static inline void hlt(void)
+{
+ asm volatile("hlt" : : : "memory");
+}
+
+static inline void cli(void)
+{
+ asm volatile("cli" : : : "memory");
+}
+
+static inline void sti(void)
+{
+ asm volatile("sti" : : : "memory");
+}
+
+static inline unsigned long get_eflags(void)
+{
+ unsigned long v;
+
+ asm volatile("pushf ; pop %0" : "=rm" (v));
+ return v;
+}
+
+static inline void set_eflags(unsigned long v)
+{
+ asm volatile("push %0 ; popf" : : "g" (v) : "memory");
+}
+
+typedef unsigned long irq_state_t;
+
+static inline irq_state_t irq_state(void)
+{
+ return get_eflags();
+}
+
+static inline irq_state_t irq_save(void)
+{
+ irq_state_t v = irq_state();
+ cli();
+ return v;
+}
+
+static inline void irq_restore(irq_state_t v)
+{
+ set_eflags(v);
+}
+
+#endif /* _X86_CPU_H */