aboutsummaryrefslogtreecommitdiffstats
path: root/com32/include/x86
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2019-02-04 12:16:57 -0800
committerH. Peter Anvin <hpa@zytor.com>2019-02-04 12:16:57 -0800
commit35badfce1f4855a32d089e1a9d8c098e3ef343c7 (patch)
treeb114358b6bac8eb97d7b3bf395e7eaeb7a1de1e5 /com32/include/x86
parent621770d33e6da95556ccfc4f08fd16edb8300c63 (diff)
downloadsyslinux-35badfce1f4855a32d089e1a9d8c098e3ef343c7.tar.gz
syslinux-35badfce1f4855a32d089e1a9d8c098e3ef343c7.tar.xz
syslinux-35badfce1f4855a32d089e1a9d8c098e3ef343c7.zip
Reorganize and clean up a bunch of the x86 code
We were doing a bunch of i386-specific things even on x86-64. Fix this, and merge x86 definitions where possible. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'com32/include/x86')
-rw-r--r--com32/include/x86/bitops.h68
-rw-r--r--com32/include/x86/cpu.h206
-rw-r--r--com32/include/x86/regs.h66
3 files changed, 340 insertions, 0 deletions
diff --git a/com32/include/x86/bitops.h b/com32/include/x86/bitops.h
new file mode 100644
index 00000000..7667b6bf
--- /dev/null
+++ b/com32/include/x86/bitops.h
@@ -0,0 +1,68 @@
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2010-2011 Intel Corporation; author: H. Peter Anvin
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall
+ * be included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * x86/bitops.h
+ *
+ * Simple bitwise operations
+ */
+
+#ifndef _X86_BITOPS_H
+#define _X86_BITOPS_H
+
+static inline void set_bit(long __bit, void *__bitmap)
+{
+ asm volatile("bts %1,%0"
+ : "+m" (*(unsigned char *)__bitmap)
+ : "Ir" (__bit) : "memory");
+}
+
+static inline void clr_bit(long __bit, void *__bitmap)
+{
+ asm volatile("btc %1,%0"
+ : "+m" (*(unsigned char *)__bitmap)
+ : "Ir" (__bit) : "memory");
+}
+
+static inline _Bool __purefunc test_bit(long __bit, const void *__bitmap)
+{
+ _Bool __r;
+
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+ asm("bt %2,%1"
+ : "=@ccc" (__r)
+ : "m" (*(const unsigned char *)__bitmap), "Ir" (__bit));
+#else
+ asm("bt %2,%1; setc %0"
+ : "=qm" (__r)
+ : "m" (*(const unsigned char *)__bitmap), "Ir" (__bit));
+#endif
+
+ return __r;
+}
+
+#endif /* _X86_BITOPS_H */
diff --git a/com32/include/x86/cpu.h b/com32/include/x86/cpu.h
new file mode 100644
index 00000000..5bdc74de
--- /dev/null
+++ b/com32/include/x86/cpu.h
@@ -0,0 +1,206 @@
+#ifndef _X86_CPU_H
+#define _X86_CPU_H
+
+#include <klibc/compiler.h>
+#include <inttypes.h>
+#include <stdbool.h>
+
+static inline bool cpu_has_eflag(unsigned long flag)
+{
+ unsigned long f1, f2;
+
+ asm("pushf ; "
+ "pushf ; "
+ "pop %0 ; "
+ "mov %0,%1 ; "
+ "xor %2,%1 ; "
+ "push %1 ; "
+ "popf ; "
+ "pushf ; "
+ "pop %1 ; "
+ "popf"
+ : "=r" (f1), "=r" (f2) : "ri" (flag));
+
+ return !!((f1 ^ f2) & flag);
+}
+
+static inline uint64_t rdtsc(void)
+{
+ uint32_t eax, edx;
+ asm volatile("rdtsc" : "=a" (eax), "=d" (edx));
+ return eax + ((uint64_t)edx << 32);
+}
+
+static inline uint32_t rdtscl(void)
+{
+ uint32_t v;
+ asm volatile("rdtsc" : "=a" (v) : : "edx");
+ return v;
+}
+
+static inline void cpuid_count(uint32_t op, uint32_t cnt,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+#if defined(__i386__) && defined(__PIC__)
+ asm volatile("movl %%ebx,%1 ; "
+ "cpuid ; "
+ "xchgl %1,%%ebx"
+ : "=a" (*eax), "=SD" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "a"(op), "c"(cnt));
+#else
+ asm volatile("cpuid"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "a"(op), "c"(cnt));
+#endif
+}
+
+static inline void cpuid(uint32_t op,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ cpuid_count(op, 0, eax, ebx, ecx, edx);
+}
+
+static inline __constfunc uint32_t cpuid_eax(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return eax;
+}
+
+static inline __constfunc uint32_t cpuid_ebx(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return ebx;
+}
+
+static inline __constfunc uint32_t cpuid_ecx(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return ecx;
+}
+
+static inline __constfunc uint32_t cpuid_edx(uint32_t level)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid_count(level, 0, &eax, &ebx, &ecx, &edx);
+ return edx;
+}
+
+static inline uint64_t rdmsr(uint32_t msr)
+{
+ uint32_t eax, edx;
+
+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (msr));
+ return eax + ((uint64_t)edx << 32);
+}
+
+static inline void wrmsr(uint64_t v, uint32_t msr)
+{
+ uint32_t eax = v;
+ uint32_t edx = v >> 32;
+
+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (msr));
+}
+
+static inline unsigned long long get_cr0(void)
+{
+ unsigned long v;
+ asm volatile("mov %%cr0,%0" : "=r" (v));
+ return v;
+}
+
+static inline void set_cr0(unsigned long v)
+{
+ asm volatile("mov %0,%%cr0" : : "r" (v));
+}
+
+static inline unsigned long get_cr4(void)
+{
+ unsigned long v;
+ asm volatile("mov %%cr4,%0" : "=r" (v));
+ return v;
+}
+
+static inline void set_cr4(unsigned long v)
+{
+ asm volatile("mov %0,%%cr4" : : "r" (v));
+}
+
+static inline uint32_t get_mxcsr(void)
+{
+ uint32_t v;
+ asm("stmxcsr %0" : "=m" (v));
+ return v;
+}
+
+static inline void set_mxcsr(uint32_t v)
+{
+ asm volatile("ldmxcsr %0" : : "m" (v));
+}
+
+static inline void fninit(void)
+{
+ asm volatile("fninit");
+}
+
+static inline void cpu_relax(void)
+{
+ asm volatile("rep ; nop");
+}
+
+static inline void hlt(void)
+{
+ asm volatile("hlt" : : : "memory");
+}
+
+static inline void cli(void)
+{
+ asm volatile("cli" : : : "memory");
+}
+
+static inline void sti(void)
+{
+ asm volatile("sti" : : : "memory");
+}
+
+static inline unsigned long get_eflags(void)
+{
+ unsigned long v;
+
+ asm volatile("pushf ; pop %0" : "=rm" (v));
+ return v;
+}
+
+static inline void set_eflags(unsigned long v)
+{
+ asm volatile("push %0 ; popf" : : "g" (v) : "memory");
+}
+
+typedef unsigned long irq_state_t;
+
+static inline irq_state_t irq_state(void)
+{
+ return get_eflags();
+}
+
+static inline irq_state_t irq_save(void)
+{
+ irq_state_t v = irq_state();
+ cli();
+ return v;
+}
+
+static inline void irq_restore(irq_state_t v)
+{
+ set_eflags(v);
+}
+
+#endif /* _X86_CPU_H */
diff --git a/com32/include/x86/regs.h b/com32/include/x86/regs.h
new file mode 100644
index 00000000..62f3a53d
--- /dev/null
+++ b/com32/include/x86/regs.h
@@ -0,0 +1,66 @@
+#ifndef _X86_REGS_H
+#define _X86_REGS_H
+
+/*
+ * Register definitions for x86 - should be assembly-safe
+ */
+
+/* EFLAGS definitions */
+#define EFLAGS_CF 0x00000001
+#define EFLAGS_PF 0x00000004
+#define EFLAGS_AF 0x00000010
+#define EFLAGS_ZF 0x00000040
+#define EFLAGS_SF 0x00000080
+#define EFLAGS_TF 0x00000100
+#define EFLAGS_IF 0x00000200
+#define EFLAGS_DF 0x00000400
+#define EFLAGS_OF 0x00000800
+#define EFLAGS_IOPL 0x00003000
+#define EFLAGS_NT 0x00004000
+#define EFLAGS_RF 0x00010000
+#define EFLAGS_VM 0x00020000
+#define EFLAGS_AC 0x00040000
+#define EFLAGS_VIF 0x00080000
+#define EFLAGS_VIP 0x00100000
+#define EFLAGS_ID 0x00200000
+
+/* CR0 definitions */
+#define CR0_PE 0x00000001
+#define CR0_MP 0x00000002
+#define CR0_EM 0x00000004
+#define CR0_TS 0x00000008
+#define CR0_ET 0x00000010
+#define CR0_NE 0x00000020
+#define CR0_WP 0x00010000
+#define CR0_AM 0x00040000
+#define CR0_NW 0x20000000
+#define CR0_CD 0x40000000
+#define CR0_PG 0x80000000
+
+/* CR4 definitions */
+#define CR4_VME 0x00000001
+#define CR4_PVI 0x00000002
+#define CR4_TSD 0x00000004
+#define CR4_DE 0x00000008
+#define CR4_PSE 0x00000010
+#define CR4_PAE 0x00000020
+#define CR4_MCE 0x00000040
+#define CR4_PGE 0x00000080
+#define CR4_PCE 0x00000100
+#define CR4_OSFXSR 0x00000200
+#define CR4_OSXMMEXCEPT 0x00000400
+#define CR4_UMIP 0x00000800
+#define CR4_VA57 0x00001000
+#define CR4_VMXE 0x00002000
+#define CR4_SMXE 0x00004000
+#define CR4_SEE 0x00008000
+#define CR4_FSGSBASE 0x00010000
+#define CR4_PCIDE 0x00020000
+#define CR4_OSXSAVE 0x00040000
+#define CR4_SMEP 0x00100000
+#define CR4_SMAP 0x00200000
+#define CR4_PKE 0x00400000
+#define CR4_CET 0x00800000
+
+#endif /* _X86_REGS_H */
+