[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT/LIBNNPACK PATCH 3/5] Add missing headers


  • To: "minios-devel@xxxxxxxxxxxxx" <minios-devel@xxxxxxxxxxxxx>
  • From: Vlad-Andrei BĂDOIU (78692) <vlad_andrei.badoiu@xxxxxxxxxxxxxxx>
  • Date: Wed, 16 Oct 2019 12:49:50 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=stud.acs.upb.ro; dmarc=pass action=none header.from=stud.acs.upb.ro; dkim=pass header.d=stud.acs.upb.ro; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=m/KjJZechhUSFNew10ggufMXwXDUiA9H/+kNomJZh2Y=; b=CdZCeoD8WZ4MCpc4qfkNTRojIGbgGyXJXNVfJN99P0p+7NX8F2U/YOCGR8OH9+QVsmhg4ym4xiH8B7mdJFgyRTBBUMQEYa+1K7EkpOy1+/WlWdCtsvNhDlX5oylJiKp8GcjTkyNkhJV4ihfWqc/4vglfc/PmTFQanyLY48QkvgSQ12NmSIq6P5e4Pfv6lePMnVk2ogapeSzc/KJzT6ZcjnJG7LKY/VDddaQARVAA9g0SdBuPvZ9C4AZIoXvAtNtjIXVzq2nCruCtY1t/JcHwZn7NLn8UR5+bnaZW8Rq1CX0K3IYVBwZAbUXyVoM8I+XKkiln1tWckXEFcj9sIb9A9Q==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=hZBykToUQhZh0x0x+cb/Dg013vfTlTtc/tuLzb8f9U4mjGVEgXJYLVcvHX82/hQ7G5TGsqc9twFC4u43LbPcmK+u/8PFlsJ9ilES2CpAM2sEepE1GagBadtfcaZ84OYpxZ0K5xUGjqx9TG5T43G8pzfNnkqui9nJoVQltpO8fJdYch6ofwvkbUtegi3GCM770CgQKg9WqN6Yp6X+a53kikZXzjC9E70GbNz6iGSUwXaEUms/isuZ9O9x9XK5t8WEq0aJG2Hqol6M+DkYbuwdIA9xTWPeL+bPZAzavzNnT7C+UfHj/qCVVFu+LHoiai8E8jeTPGeWzcQWdzH2MvX2Bg==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=vlad_andrei.badoiu@xxxxxxxxxxxxxxx;
  • Cc: "felipe.huici@xxxxxxxxx" <felipe.huici@xxxxxxxxx>, Vlad-Andrei BĂDOIU (78692) <vlad_andrei.badoiu@xxxxxxxxxxxxxxx>
  • Delivery-date: Wed, 16 Oct 2019 12:50:20 +0000
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>
  • Thread-index: AQHVhCAyGjSl75coPkaY1Hx7S5VsEA==
  • Thread-topic: [UNIKRAFT/LIBNNPACK PATCH 3/5] Add missing headers

The added headers are placeholders until they are properly implemented
in Unikraft.

Signed-off-by: Vlad-Andrei Badoiu <vlad_andrei.badoiu@xxxxxxxxxxxxxxx>
---
 include/cpuinfo.h     | 1810 +++++++++++++++++++++++++++++++++++++++++
 include/linux/futex.h |  217 +++++
 2 files changed, 2027 insertions(+)
 create mode 100644 include/cpuinfo.h
 create mode 100644 include/linux/futex.h

diff --git a/include/cpuinfo.h b/include/cpuinfo.h
new file mode 100644
index 0000000..218c292
--- /dev/null
+++ b/include/cpuinfo.h
@@ -0,0 +1,1810 @@
+/*
+ Copyright (c) 2017-2018 Facebook Inc.
+ Copyright (C) 2012-2017 Georgia Institute of Technology
+ Copyright (C) 2010-2012 Marat Dukhan
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma once
+#ifndef CPUINFO_H
+#define CPUINFO_H
+
+#ifndef __cplusplus
+       #include <stdbool.h>
+#endif
+
+#ifdef __APPLE__
+       #include <TargetConditionals.h>
+#endif
+
+#include <stdint.h>
+
+/* Identify architecture and define corresponding macro */
+
+#if defined(__i386__) || defined(__i486__) || defined(__i586__) || 
defined(__i686__) || defined(_M_IX86)
+       #define CPUINFO_ARCH_X86 1
+#endif
+
+#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || 
defined(_M_AMD64)
+       #define CPUINFO_ARCH_X86_64 1
+#endif
+
+#if defined(__arm__) || defined(_M_ARM)
+       #define CPUINFO_ARCH_ARM 1
+#endif
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+       #define CPUINFO_ARCH_ARM64 1
+#endif
+
+#if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64)
+       #define CPUINFO_ARCH_PPC64 1
+#endif
+
+#if defined(__pnacl__)
+       #define CPUINFO_ARCH_PNACL 1
+#endif
+
+#if defined(EMSCRIPTEN)
+       #define CPUINFO_ARCH_ASMJS 1
+#endif
+
+#if CPUINFO_ARCH_X86 && defined(_MSC_VER)
+       #define CPUINFO_ABI __cdecl
+#elif CPUINFO_ARCH_X86 && defined(__GNUC__)
+       #define CPUINFO_ABI __attribute__((__cdecl__))
+#else
+       #define CPUINFO_ABI
+#endif
+
+/* Define other architecture-specific macros as 0 */
+
+#ifndef CPUINFO_ARCH_X86
+       #define CPUINFO_ARCH_X86 0
+#endif
+
+#ifndef CPUINFO_ARCH_X86_64
+       #define CPUINFO_ARCH_X86_64 0
+#endif
+
+#ifndef CPUINFO_ARCH_ARM
+       #define CPUINFO_ARCH_ARM 0
+#endif
+
+#ifndef CPUINFO_ARCH_ARM64
+       #define CPUINFO_ARCH_ARM64 0
+#endif
+
+#ifndef CPUINFO_ARCH_PPC64
+       #define CPUINFO_ARCH_PPC64 0
+#endif
+
+#ifndef CPUINFO_ARCH_PNACL
+       #define CPUINFO_ARCH_PNACL 0
+#endif
+
+#ifndef CPUINFO_ARCH_ASMJS
+       #define CPUINFO_ARCH_ASMJS 0
+#endif
+
+#define CPUINFO_CACHE_UNIFIED          0x00000001
+#define CPUINFO_CACHE_INCLUSIVE        0x00000002
+#define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004
+
+struct cpuinfo_cache {
+       /** Cache size in bytes */
+       uint32_t size;
+       /** Number of ways of associativity */
+       uint32_t associativity;
+       /** Number of sets */
+       uint32_t sets;
+       /** Number of partitions */
+       uint32_t partitions;
+       /** Line size in bytes */
+       uint32_t line_size;
+       /**
+        * Binary characteristics of the cache (unified cache, inclusive cache, 
cache with complex indexing).
+        *
+        * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, 
CPUINFO_CACHE_COMPLEX_INDEXING
+        */
+       uint32_t flags;
+       /** Index of the first logical processor that shares this cache */
+       uint32_t processor_start;
+       /** Number of logical processors that share this cache */
+       uint32_t processor_count;
+};
+
+struct cpuinfo_trace_cache {
+       uint32_t uops;
+       uint32_t associativity;
+};
+
+#define CPUINFO_PAGE_SIZE_4KB  0x1000
+#define CPUINFO_PAGE_SIZE_1MB  0x100000
+#define CPUINFO_PAGE_SIZE_2MB  0x200000
+#define CPUINFO_PAGE_SIZE_4MB  0x400000
+#define CPUINFO_PAGE_SIZE_16MB 0x1000000
+#define CPUINFO_PAGE_SIZE_1GB  0x40000000
+
+struct cpuinfo_tlb {
+       uint32_t entries;
+       uint32_t associativity;
+       uint64_t pages;
+};
+
+/** Vendor of processor core design */
+enum cpuinfo_vendor {
+       /** Processor vendor is not known to the library, or the library failed 
to get vendor information from the OS. */
+       cpuinfo_vendor_unknown = 0,
+
+       /* Active vendors of modern CPUs */
+
+       /**
+        * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor 
microarchitectures.
+        *
+        * Sold its ARM design subsidiary in 2006. The last ARM processor 
design was released in 2004.
+        */
+       cpuinfo_vendor_intel    = 1,
+       /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor 
microarchitectures. */
+       cpuinfo_vendor_amd      = 2,
+       /** ARM Holdings plc. Vendor of ARM and ARM64 processor 
microarchitectures. */
+       cpuinfo_vendor_arm      = 3,
+       /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor 
microarchitectures. */
+       cpuinfo_vendor_qualcomm = 4,
+       /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */
+       cpuinfo_vendor_apple    = 5,
+       /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor 
microarchitectures. */
+       cpuinfo_vendor_samsung  = 6,
+       /** Nvidia Corporation. Vendor of ARM64-compatible processor 
microarchitectures. */
+       cpuinfo_vendor_nvidia   = 7,
+       /** MIPS Technologies, Inc. Vendor of MIPS processor 
microarchitectures. */
+       cpuinfo_vendor_mips     = 8,
+       /** International Business Machines Corporation. Vendor of PowerPC 
processor microarchitectures. */
+       cpuinfo_vendor_ibm      = 9,
+       /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. 
*/
+       cpuinfo_vendor_ingenic  = 10,
+       /**
+        * VIA Technologies, Inc. Vendor of x86 and x86-64 processor 
microarchitectures.
+        *
+        * Processors are designed by Centaur Technology, a subsidiary of VIA 
Technologies.
+        */
+       cpuinfo_vendor_via      = 11,
+       /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */
+       cpuinfo_vendor_cavium   = 12,
+       /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */
+       cpuinfo_vendor_broadcom = 13,
+       /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor 
microarchitectures. */
+       cpuinfo_vendor_apm      = 14,
+       /**
+        * Huawei Technologies Co., Ltd. Vendor of ARM64 processor 
microarchitectures.
+        *
+        * Processors are designed by HiSilicon, a subsidiary of Huawei.
+        */
+       cpuinfo_vendor_huawei   = 15,
+
+       /* Active vendors of embedded CPUs */
+
+       /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. 
*/
+       cpuinfo_vendor_texas_instruments = 30,
+       /** Marvell Technology Group Ltd. Vendor of ARM processor 
microarchitectures. */
+       cpuinfo_vendor_marvell           = 31,
+       /** RDC Semiconductor Co., Ltd. Vendor of x86 processor 
microarchitectures. */
+       cpuinfo_vendor_rdc               = 32,
+       /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */
+       cpuinfo_vendor_dmp               = 33,
+       /** Motorola, Inc. Vendor of PowerPC and ARM processor 
microarchitectures. */
+       cpuinfo_vendor_motorola          = 34,
+
+       /* Defunct CPU vendors */
+
+       /**
+        * Transmeta Corporation. Vendor of x86 processor microarchitectures.
+        *
+        * Now defunct. The last processor design was released in 2004.
+        * Transmeta processors implemented VLIW ISA and used binary 
translation to execute x86 code.
+        */
+       cpuinfo_vendor_transmeta = 50,
+       /**
+        * Cyrix Corporation. Vendor of x86 processor microarchitectures.
+        *
+        * Now defunct. The last processor design was released in 1996.
+        */
+       cpuinfo_vendor_cyrix     = 51,
+       /**
+        * Rise Technology. Vendor of x86 processor microarchitectures.
+        *
+        * Now defunct. The last processor design was released in 1999.
+        */
+       cpuinfo_vendor_rise      = 52,
+       /**
+        * National Semiconductor. Vendor of x86 processor microarchitectures.
+        *
+        * Sold its x86 design subsidiary in 1999. The last processor design 
was released in 1998.
+        */
+       cpuinfo_vendor_nsc       = 53,
+       /**
+        * Silicon Integrated Systems. Vendor of x86 processor 
microarchitectures.
+        *
+        * Sold its x86 design subsidiary in 2001. The last processor design 
was released in 2001.
+        */
+       cpuinfo_vendor_sis       = 54,
+       /**
+        * NexGen. Vendor of x86 processor microarchitectures.
+        *
+        * Now defunct. The last processor design was released in 1994.
+        * NexGen designed the first x86 microarchitecture which decomposed x86 
instructions into simple microoperations.
+        */
+       cpuinfo_vendor_nexgen    = 55,
+       /**
+        * United Microelectronics Corporation. Vendor of x86 processor 
microarchitectures.
+        *
+        * Ceased x86 in the early 1990s. The last processor design was 
released in 1991.
+        * Designed U5C and U5D processors. Both are 486 level.
+        */
+       cpuinfo_vendor_umc       = 56,
+       /**
+        * Digital Equipment Corporation. Vendor of ARM processor 
microarchitecture.
+        *
+        * Sold its ARM designs in 1997. The last processor design was released 
in 1997.
+        */
+       cpuinfo_vendor_dec       = 57,
+};
+
+/**
+ * Processor microarchitecture
+ *
+ * Processors with different microarchitectures often have different 
instruction performance characteristics,
+ * and may have dramatically different pipeline organization.
+ */
+enum cpuinfo_uarch {
+       /** Microarchitecture is unknown, or the library failed to get 
information about the microarchitecture from OS */
+       cpuinfo_uarch_unknown = 0,
+
+       /** Pentium and Pentium MMX microarchitecture. */
+       cpuinfo_uarch_p5    = 0x00100100,
+       /** Intel Quark microarchitecture. */
+       cpuinfo_uarch_quark = 0x00100101,
+
+       /** Pentium Pro, Pentium II, and Pentium III. */
+       cpuinfo_uarch_p6           = 0x00100200,
+       /** Pentium M. */
+       cpuinfo_uarch_dothan       = 0x00100201,
+       /** Intel Core microarchitecture. */
+       cpuinfo_uarch_yonah        = 0x00100202,
+       /** Intel Core 2 microarchitecture on 65 nm process. */
+       cpuinfo_uarch_conroe       = 0x00100203,
+       /** Intel Core 2 microarchitecture on 45 nm process. */
+       cpuinfo_uarch_penryn       = 0x00100204,
+       /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st 
gen). */
+       cpuinfo_uarch_nehalem      = 0x00100205,
+       /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */
+       cpuinfo_uarch_sandy_bridge = 0x00100206,
+       /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */
+       cpuinfo_uarch_ivy_bridge   = 0x00100207,
+       /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */
+       cpuinfo_uarch_haswell      = 0x00100208,
+       /** Intel Broadwell microarchitecture. */
+       cpuinfo_uarch_broadwell    = 0x00100209,
+       /** Intel Sky Lake microarchitecture. */
+       cpuinfo_uarch_sky_lake     = 0x0010020A,
+       /** Intel Kaby Lake microarchitecture. */
+       cpuinfo_uarch_kaby_lake    = 0x0010020B,
+
+       /** Pentium 4 with Willamette, Northwood, or Foster cores. */
+       cpuinfo_uarch_willamette = 0x00100300,
+       /** Pentium 4 with Prescott and later cores. */
+       cpuinfo_uarch_prescott   = 0x00100301,
+
+       /** Intel Atom on 45 nm process. */
+       cpuinfo_uarch_bonnell    = 0x00100400,
+       /** Intel Atom on 32 nm process. */
+       cpuinfo_uarch_saltwell   = 0x00100401,
+       /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */
+       cpuinfo_uarch_silvermont = 0x00100402,
+       /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */
+       cpuinfo_uarch_airmont    = 0x00100403,
+
+       /** Intel Knights Ferry HPC boards. */
+       cpuinfo_uarch_knights_ferry   = 0x00100500,
+       /** Intel Knights Corner HPC boards (aka Xeon Phi). */
+       cpuinfo_uarch_knights_corner  = 0x00100501,
+       /** Intel Knights Landing microarchitecture (second-gen MIC). */
+       cpuinfo_uarch_knights_landing = 0x00100502,
+       /** Intel Knights Hill microarchitecture (third-gen MIC). */
+       cpuinfo_uarch_knights_hill    = 0x00100503,
+       /** Intel Knights Mill Xeon Phi. */
+       cpuinfo_uarch_knights_mill    = 0x00100504,
+
+       /** Intel/Marvell XScale series. */
+       cpuinfo_uarch_xscale = 0x00100600,
+
+       /** AMD K5. */
+       cpuinfo_uarch_k5        = 0x00200100,
+       /** AMD K6 and alike. */
+       cpuinfo_uarch_k6        = 0x00200101,
+       /** AMD Athlon and Duron. */
+       cpuinfo_uarch_k7        = 0x00200102,
+       /** AMD Athlon 64, Opteron 64. */
+       cpuinfo_uarch_k8        = 0x00200103,
+       /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */
+       cpuinfo_uarch_k10       = 0x00200104,
+       /**
+        * AMD Bulldozer microarchitecture
+        * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs.
+        */
+       cpuinfo_uarch_bulldozer = 0x00200105,
+       /**
+        * AMD Piledriver microarchitecture
+        * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu 
Dhabi Opteron CPUs.
+        */
+       cpuinfo_uarch_piledriver  = 0x00200106,
+       /** AMD Steamroller microarchitecture (Kaveri APUs). */
+       cpuinfo_uarch_steamroller = 0x00200107,
+       /** AMD Excavator microarchitecture (Carizzo APUs). */
+       cpuinfo_uarch_excavator   = 0x00200108,
+       /** AMD Zen microarchitecture (Ryzen CPUs). */
+       cpuinfo_uarch_zen         = 0x00200109,
+
+       /** NSC Geode and AMD Geode GX and LX. */
+       cpuinfo_uarch_geode  = 0x00200200,
+       /** AMD Bobcat mobile microarchitecture. */
+       cpuinfo_uarch_bobcat = 0x00200201,
+       /** AMD Jaguar mobile microarchitecture. */
+       cpuinfo_uarch_jaguar = 0x00200202,
+       /** AMD Puma mobile microarchitecture. */
+       cpuinfo_uarch_puma   = 0x00200203,
+
+       /** ARM7 series. */
+       cpuinfo_uarch_arm7  = 0x00300100,
+       /** ARM9 series. */
+       cpuinfo_uarch_arm9  = 0x00300101,
+       /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */
+       cpuinfo_uarch_arm11 = 0x00300102,
+
+       /** ARM Cortex-A5. */
+       cpuinfo_uarch_cortex_a5  = 0x00300205,
+       /** ARM Cortex-A7. */
+       cpuinfo_uarch_cortex_a7  = 0x00300207,
+       /** ARM Cortex-A8. */
+       cpuinfo_uarch_cortex_a8  = 0x00300208,
+       /** ARM Cortex-A9. */
+       cpuinfo_uarch_cortex_a9  = 0x00300209,
+       /** ARM Cortex-A12. */
+       cpuinfo_uarch_cortex_a12 = 0x00300212,
+       /** ARM Cortex-A15. */
+       cpuinfo_uarch_cortex_a15 = 0x00300215,
+       /** ARM Cortex-A17. */
+       cpuinfo_uarch_cortex_a17 = 0x00300217,
+
+       /** ARM Cortex-A32. */
+       cpuinfo_uarch_cortex_a32 = 0x00300332,
+       /** ARM Cortex-A35. */
+       cpuinfo_uarch_cortex_a35 = 0x00300335,
+       /** ARM Cortex-A53. */
+       cpuinfo_uarch_cortex_a53 = 0x00300353,
+       /** ARM Cortex-A55. */
+       cpuinfo_uarch_cortex_a55 = 0x00300355,
+       /** ARM Cortex-A57. */
+       cpuinfo_uarch_cortex_a57 = 0x00300357,
+       /** ARM Cortex-A72. */
+       cpuinfo_uarch_cortex_a72 = 0x00300372,
+       /** ARM Cortex-A73. */
+       cpuinfo_uarch_cortex_a73 = 0x00300373,
+       /** ARM Cortex-A75. */
+       cpuinfo_uarch_cortex_a75 = 0x00300375,
+       /** ARM Cortex-A76. */
+       cpuinfo_uarch_cortex_a76 = 0x00300376,
+
+       /** Qualcomm Scorpion. */
+       cpuinfo_uarch_scorpion = 0x00400100,
+       /** Qualcomm Krait. */
+       cpuinfo_uarch_krait    = 0x00400101,
+       /** Qualcomm Kryo. */
+       cpuinfo_uarch_kryo     = 0x00400102,
+       /** Qualcomm Falkor. */
+       cpuinfo_uarch_falkor   = 0x00400103,
+       /** Qualcomm Saphira. */
+       cpuinfo_uarch_saphira  = 0x00400104,
+
+       /** Nvidia Denver. */
+       cpuinfo_uarch_denver   = 0x00500100,
+       /** Nvidia Denver 2. */
+       cpuinfo_uarch_denver2  = 0x00500101,
+       /** Nvidia Carmel. */
+       cpuinfo_uarch_carmel   = 0x00500102,
+
+       /** Samsung Mongoose M1 (Exynos 8890 big cores). */
+       cpuinfo_uarch_mongoose_m1 = 0x00600100,
+       /** Samsung Mongoose M2 (Exynos 8895 big cores). */
+       cpuinfo_uarch_mongoose_m2 = 0x00600101,
+       /** Samsung Meerkat M3 (Exynos 9810 big cores). */
+       cpuinfo_uarch_meerkat_m3  = 0x00600102,
+
+       /** Apple A6 and A6X processors. */
+       cpuinfo_uarch_swift     = 0x00700100,
+       /** Apple A7 processor. */
+       cpuinfo_uarch_cyclone   = 0x00700101,
+       /** Apple A8 and A8X processor. */
+       cpuinfo_uarch_typhoon   = 0x00700102,
+       /** Apple A9 and A9X processor. */
+       cpuinfo_uarch_twister   = 0x00700103,
+       /** Apple A10 and A10X processor. */
+       cpuinfo_uarch_hurricane = 0x00700104,
+       /** Apple A11 processor (big cores). */
+       cpuinfo_uarch_monsoon   = 0x00700105,
+       /** Apple A11 processor (little cores). */
+       cpuinfo_uarch_mistral   = 0x00700106,
+
+       /** Cavium ThunderX. */
+       cpuinfo_uarch_thunderx = 0x00800100,
+       /** Cavium ThunderX2 (originally Broadcom Vulkan). */
+       cpuinfo_uarch_thunderx2 = 0x00800200,
+
+       /** Marvell PJ4. */
+       cpuinfo_uarch_pj4 = 0x00900100,
+
+       /** Broadcom Brahma B15. */
+       cpuinfo_uarch_brahma_b15 = 0x00A00100,
+       /** Broadcom Brahma B53. */
+       cpuinfo_uarch_brahma_b53 = 0x00A00101,
+
+       /** Applied Micro X-Gene. */
+       cpuinfo_uarch_xgene = 0x00B00100,
+};
+
+struct cpuinfo_processor {
+       /** SMT (hyperthread) ID within a core */
+       uint32_t smt_id;
+       /** Core containing this logical processor */
+       const struct cpuinfo_core* core;
+       /** Cluster of cores containing this logical processor */
+       const struct cpuinfo_cluster* cluster;
+       /** Physical package containing this logical processor */
+       const struct cpuinfo_package* package;
+#if defined(__linux__)
+       /**
+        * Linux-specific ID for the logical processor:
+        * - Linux kernel exposes information about this logical processor in 
/sys/devices/system/cpu/cpu<linux_id>/
+        * - Bit <linux_id> in the cpu_set_t identifies this logical processor
+        */
+       int linux_id;
+#endif
+#if defined(_WIN32)
+       /** Windows-specific ID for the group containing the logical processor. 
*/
+       uint16_t windows_group_id;
+       /**
+        * Windows-specific ID of the logical processor within its group:
+        * - Bit <windows_processor_id> in the KAFFINITY mask identifies this 
logical processor within its group.
+        */
+       uint16_t windows_processor_id;
+#endif
+#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+       /** APIC ID (unique x86-specific ID of the logical processor) */
+       uint32_t apic_id;
+#endif
+       struct {
+               /** Level 1 instruction cache */
+               const struct cpuinfo_cache* l1i;
+               /** Level 1 data cache */
+               const struct cpuinfo_cache* l1d;
+               /** Level 2 unified or data cache */
+               const struct cpuinfo_cache* l2;
+               /** Level 3 unified or data cache */
+               const struct cpuinfo_cache* l3;
+               /** Level 4 unified or data cache */
+               const struct cpuinfo_cache* l4;
+       } cache;
+};
+
+struct cpuinfo_core {
+       /** Index of the first logical processor on this core. */
+       uint32_t processor_start;
+       /** Number of logical processors on this core */
+       uint32_t processor_count;
+       /** Core ID within a package */
+       uint32_t core_id;
+       /** Cluster containing this core */
+       const struct cpuinfo_cluster* cluster;
+       /** Physical package containing this core. */
+       const struct cpuinfo_package* package;
+       /** Vendor of the CPU microarchitecture for this core */
+       enum cpuinfo_vendor vendor;
+       /** CPU microarchitecture for this core */
+       enum cpuinfo_uarch uarch;
+#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+       /** Value of CPUID leaf 1 EAX register for this core */
+       uint32_t cpuid;
+#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+       /** Value of Main ID Register (MIDR) for this core */
+       uint32_t midr;
+#endif
+       /** Clock rate (non-Turbo) of the core, in Hz */
+       uint64_t frequency;
+};
+
+struct cpuinfo_cluster {
+       /** Index of the first logical processor in the cluster */
+       uint32_t processor_start;
+       /** Number of logical processors in the cluster */
+       uint32_t processor_count;
+       /** Index of the first core in the cluster */
+       uint32_t core_start;
+       /** Number of cores on the cluster */
+       uint32_t core_count;
+       /** Cluster ID within a package */
+       uint32_t cluster_id;
+       /** Physical package containing the cluster */
+       const struct cpuinfo_package* package;
+       /** CPU microarchitecture vendor of the cores in the cluster */
+       enum cpuinfo_vendor vendor;
+       /** CPU microarchitecture of the cores in the cluster */
+       enum cpuinfo_uarch uarch;
+#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+       /** Value of CPUID leaf 1 EAX register of the cores in the cluster */
+       uint32_t cpuid;
+#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+       /** Value of Main ID Register (MIDR) of the cores in the cluster */
+       uint32_t midr;
+#endif
+       /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */
+       uint64_t frequency;
+};
+
+#define CPUINFO_PACKAGE_NAME_MAX 48
+
+struct cpuinfo_package {
+       /** SoC or processor chip model name */
+       char name[CPUINFO_PACKAGE_NAME_MAX];
+       /** Index of the first logical processor on this physical package */
+       uint32_t processor_start;
+       /** Number of logical processors on this physical package */
+       uint32_t processor_count;
+       /** Index of the first core on this physical package */
+       uint32_t core_start;
+       /** Number of cores on this physical package */
+       uint32_t core_count;
+       /** Index of the first cluster of cores on this physical package */
+       uint32_t cluster_start;
+       /** Number of clusters of cores on this physical package */
+       uint32_t cluster_count;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool CPUINFO_ABI cpuinfo_initialize(void)
+{
+       return 1;
+}
+
+void CPUINFO_ABI cpuinfo_deinitialize(void)
+{
+
+}
+
+#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+       /* This structure is not a part of stable API. Use cpuinfo_has_x86_* 
functions instead. */
+       struct cpuinfo_x86_isa {
+               #if CPUINFO_ARCH_X86
+                       bool rdtsc;
+               #endif
+               bool rdtscp;
+               bool rdpid;
+               bool sysenter;
+               #if CPUINFO_ARCH_X86
+                       bool syscall;
+               #endif
+               bool msr;
+               bool clzero;
+               bool clflush;
+               bool clflushopt;
+               bool mwait;
+               bool mwaitx;
+               #if CPUINFO_ARCH_X86
+                       bool emmx;
+               #endif
+               bool fxsave;
+               bool xsave;
+               #if CPUINFO_ARCH_X86
+                       bool fpu;
+                       bool mmx;
+                       bool mmx_plus;
+               #endif
+               bool three_d_now;
+               bool three_d_now_plus;
+               #if CPUINFO_ARCH_X86
+                       bool three_d_now_geode;
+               #endif
+               bool prefetch;
+               bool prefetchw;
+               bool prefetchwt1;
+               #if CPUINFO_ARCH_X86
+                       bool daz;
+                       bool sse;
+                       bool sse2;
+               #endif
+               bool sse3;
+               bool ssse3;
+               bool sse4_1;
+               bool sse4_2;
+               bool sse4a;
+               bool misaligned_sse;
+               bool avx;
+               bool fma3;
+               bool fma4;
+               bool xop;
+               bool f16c;
+               bool avx2;
+               bool avx512f;
+               bool avx512pf;
+               bool avx512er;
+               bool avx512cd;
+               bool avx512dq;
+               bool avx512bw;
+               bool avx512vl;
+               bool avx512ifma;
+               bool avx512vbmi;
+               bool avx512vbmi2;
+               bool avx512bitalg;
+               bool avx512vpopcntdq;
+               bool avx512vnni;
+               bool avx512_4vnniw;
+               bool avx512_4fmaps;
+               bool hle;
+               bool rtm;
+               bool xtest;
+               bool mpx;
+               #if CPUINFO_ARCH_X86
+                       bool cmov;
+                       bool cmpxchg8b;
+               #endif
+               bool cmpxchg16b;
+               bool clwb;
+               bool movbe;
+               #if CPUINFO_ARCH_X86_64
+                       bool lahf_sahf;
+               #endif
+               bool fs_gs_base;
+               bool lzcnt;
+               bool popcnt;
+               bool tbm;
+               bool bmi;
+               bool bmi2;
+               bool adx;
+               bool aes;
+               bool vaes;
+               bool pclmulqdq;
+               bool vpclmulqdq;
+               bool gfni;
+               bool rdrand;
+               bool rdseed;
+               bool sha;
+               bool rng;
+               bool ace;
+               bool ace2;
+               bool phe;
+               bool pmm;
+               bool lwp;
+       };
+
+       extern struct cpuinfo_x86_isa cpuinfo_isa;
+#endif
+
+static inline bool cpuinfo_has_x86_rdtsc(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.rdtsc;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_rdtscp(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.rdtscp;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_rdpid(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.rdpid;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_clzero(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.clzero;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_mwait(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.mwait;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_mwaitx(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.mwaitx;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_fxsave(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.fxsave;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_xsave(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.xsave;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_fpu(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.fpu;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_mmx(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.mmx;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_mmx_plus(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.mmx_plus;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_3dnow(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.three_d_now;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_3dnow_plus(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.three_d_now_plus;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_3dnow_geode(void) {
+       #if CPUINFO_ARCH_X86_64
+               return false;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return false;
+               #else
+                       return cpuinfo_isa.three_d_now_geode;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_prefetch(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.prefetch;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_prefetchw(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.prefetchw;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_prefetchwt1(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.prefetchwt1;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_daz(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.daz;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_sse(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.sse;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_sse2(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.sse2;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_sse3(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.sse3;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_ssse3(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.ssse3;
+               #endif
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_sse4_1(void) {
+       #if CPUINFO_ARCH_X86_64
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.sse4_1;
+               #endif
+       #elif CPUINFO_ARCH_X86
+               return cpuinfo_isa.sse4_1;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_sse4_2(void) {
+       #if CPUINFO_ARCH_X86_64
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.sse4_2;
+               #endif
+       #elif CPUINFO_ARCH_X86
+               return cpuinfo_isa.sse4_2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_sse4a(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.sse4a;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_misaligned_sse(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.misaligned_sse;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               //return cpuinfo_isa.avx;
+        return false;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_fma3(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               //return cpuinfo_isa.fma3;
+        return false;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_fma4(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.fma4;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_xop(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.xop;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_f16c(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.f16c;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx2(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+        return false;
+               //return cpuinfo_isa.avx2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512f(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512f;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512pf(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512pf;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512er(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512er;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512cd(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512cd;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512dq(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512dq;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512bw(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512bw;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512vl(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512vl;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512ifma(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512ifma;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512vbmi(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512vbmi;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512vbmi2(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512vbmi2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512bitalg(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512bitalg;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512vpopcntdq;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512vnni(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512vnni;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512_4vnniw(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512_4vnniw;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_avx512_4fmaps(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.avx512_4fmaps;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_hle(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.hle;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_rtm(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.rtm;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_xtest(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.xtest;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_mpx(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.mpx;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_cmov(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               return cpuinfo_isa.cmov;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_cmpxchg8b(void) {
+       #if CPUINFO_ARCH_X86_64
+               return true;
+       #elif CPUINFO_ARCH_X86
+               return cpuinfo_isa.cmpxchg8b;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_cmpxchg16b(void) {
+       #if CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.cmpxchg16b;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_clwb(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.clwb;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_movbe(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.movbe;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_lahf_sahf(void) {
+       #if CPUINFO_ARCH_X86
+               return true;
+       #elif CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.lahf_sahf;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_lzcnt(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.lzcnt;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_popcnt(void) {
+       #if CPUINFO_ARCH_X86_64
+               #if defined(__ANDROID__)
+                       return true;
+               #else
+                       return cpuinfo_isa.popcnt;
+               #endif
+       #elif CPUINFO_ARCH_X86
+               return cpuinfo_isa.popcnt;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_tbm(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.tbm;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_bmi(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.bmi;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_bmi2(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.bmi2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_adx(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.adx;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_aes(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.aes;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_vaes(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.vaes;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_pclmulqdq(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.pclmulqdq;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_vpclmulqdq(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.vpclmulqdq;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_gfni(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.gfni;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_rdrand(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.rdrand;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_rdseed(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.rdseed;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_x86_sha(void) {
+       #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+               return cpuinfo_isa.sha;
+       #else
+               return false;
+       #endif
+}
+
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+       /* This structure is not a part of stable API. Use cpuinfo_has_arm_* 
functions instead. */
+       struct cpuinfo_arm_isa {
+               #if CPUINFO_ARCH_ARM
+                       bool thumb;
+                       bool thumb2;
+                       bool thumbee;
+                       bool jazelle;
+                       bool armv5e;
+                       bool armv6;
+                       bool armv6k;
+                       bool armv7;
+                       bool armv7mp;
+                       bool idiv;
+
+                       bool vfpv2;
+                       bool vfpv3;
+                       bool d32;
+                       bool fp16;
+                       bool fma;
+
+                       bool wmmx;
+                       bool wmmx2;
+                       bool neon;
+               #endif
+               #if CPUINFO_ARCH_ARM64
+                       bool atomics;
+               #endif
+               bool rdm;
+               bool fp16arith;
+               bool dot;
+               bool jscvt;
+               bool fcma;
+
+               bool aes;
+               bool sha1;
+               bool sha2;
+               bool pmull;
+               bool crc32;
+       };
+
+       extern struct cpuinfo_arm_isa cpuinfo_isa;
+#endif
+
+static inline bool cpuinfo_has_arm_thumb(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.thumb;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_thumb2(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.thumb2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_v5e(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.armv5e;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_v6(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.armv6;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_v6k(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.armv6k;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_v7(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.armv7;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_v7mp(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.armv7mp;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_idiv(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.idiv;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_vfpv2(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.vfpv2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_vfpv3(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.vfpv3;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_vfpv3_d32(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_vfpv3_fp16(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_vfpv4(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_vfpv4_d32(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_wmmx(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.wmmx;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_wmmx2(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.wmmx2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_neon(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.neon;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_neon_fp16(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.neon && cpuinfo_isa.fp16;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_neon_fma(void) {
+       #if CPUINFO_ARCH_ARM64
+               return true;
+       #elif CPUINFO_ARCH_ARM
+               return cpuinfo_isa.neon && cpuinfo_isa.fma;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_atomics(void) {
+       #if CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.atomics;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_neon_rdm(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.rdm;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_neon_fp16_arith(void) {
+       #if CPUINFO_ARCH_ARM
+               return cpuinfo_isa.neon && cpuinfo_isa.fp16arith;
+       #elif CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.fp16arith;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_fp16_arith(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.fp16arith;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_neon_dot(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.dot;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_jscvt(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.jscvt;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_fcma(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.fcma;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_aes(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.aes;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_sha1(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.sha1;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_sha2(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.sha2;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_pmull(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.pmull;
+       #else
+               return false;
+       #endif
+}
+
+static inline bool cpuinfo_has_arm_crc32(void) {
+       #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+               return cpuinfo_isa.crc32;
+       #else
+               return false;
+       #endif
+}
+
+const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
+const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
+const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
+const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void)
+{
+       static struct cpuinfo_cache cc;
+       cc.size = 64 * 512 * 12;
+       cc.associativity = 12;
+       cc.associativity = 512;
+       cc.partitions = 64;
+       cc.line_size = 64;
+       cc.flags=CPUINFO_CACHE_INCLUSIVE;
+       cc.processor_start = 0;
+       cc.processor_count = 1;
+       return &cc;
+}
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void)
+{
+       static struct cpuinfo_cache cc;
+       cc.size = 64 * 512 * 12;
+       cc.associativity = 12;
+       cc.associativity = 512;
+       cc.partitions = 64;
+       cc.line_size = 64;
+       cc.flags=CPUINFO_CACHE_INCLUSIVE;
+       cc.processor_start = 0;
+       cc.processor_count = 1;
+       return &cc;
+}
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void)
+{
+       static struct cpuinfo_cache cc;
+       cc.size = 64 * 512 * 12;
+       cc.associativity = 12;
+       cc.associativity = 512;
+       cc.partitions = 64;
+       cc.line_size = 64;
+       cc.flags=CPUINFO_CACHE_INCLUSIVE;
+       cc.processor_start = 0;
+       cc.processor_count = 1;
+       return &cc;
+}
+
+const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t 
index);
+const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index);
+const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index);
+const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index)
+{
+       static struct cpuinfo_cache cc;
+       cc.size = 64 * 512 * 12;
+       cc.associativity = 12;
+       cc.associativity = 512;
+       cc.partitions = 64;
+       cc.line_size = 64;
+       cc.flags=CPUINFO_CACHE_INCLUSIVE;
+       cc.processor_start = 0;
+       cc.processor_count = 1;
+       return &cc;
+}
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index)
+{
+       static struct cpuinfo_cache cc;
+       cc.size = 64 * 512 * 12;
+       cc.associativity = 12;
+       cc.associativity = 512;
+       cc.partitions = 64;
+       cc.line_size = 64;
+       cc.flags=CPUINFO_CACHE_INCLUSIVE;
+       cc.processor_start = 0;
+       cc.processor_count = 1;
+       return &cc;
+}
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index)
+{
+       static struct cpuinfo_cache cc;
+       cc.size = 64 * 512 * 12;
+       cc.associativity = 12;
+       cc.associativity = 512;
+       cc.partitions = 64;
+       cc.line_size = 64;
+       cc.flags=CPUINFO_CACHE_INCLUSIVE;
+       cc.processor_start = 0;
+       cc.processor_count = 1;
+       return &cc;
+}
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index)
+{
+       static struct cpuinfo_cache cc;
+       cc.size = 64 * 512 * 12;
+       cc.associativity = 12;
+       cc.associativity = 512;
+       cc.partitions = 64;
+       cc.line_size = 64;
+       cc.flags=CPUINFO_CACHE_INCLUSIVE;
+       cc.processor_start = 0;
+       cc.processor_count = 1;
+       return &cc;
+}
+uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void);
+
+const struct cpuinfo_processor* CPUINFO_ABI 
cpuinfo_get_current_processor(void);
+const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* CPUINFO_H */
+
diff --git a/include/linux/futex.h b/include/linux/futex.h
new file mode 100644
index 0000000..6227f9d
--- /dev/null
+++ b/include/linux/futex.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_FUTEX_H
+#define _LINUX_FUTEX_H
+
+//#include <linux/compiler.h>
+//#include <linux/types.h>
+
+/* Second argument to futex syscall */
+
+
+#define FUTEX_WAIT             0
+#define FUTEX_WAKE             1
+#define FUTEX_FD               2
+#define FUTEX_REQUEUE          3
+#define FUTEX_CMP_REQUEUE      4
+#define FUTEX_WAKE_OP          5
+#define FUTEX_LOCK_PI          6
+#define FUTEX_UNLOCK_PI                7
+#define FUTEX_TRYLOCK_PI       8
+#define FUTEX_WAIT_BITSET      9
+#define FUTEX_WAKE_BITSET      10
+#define FUTEX_WAIT_REQUEUE_PI  11
+#define FUTEX_CMP_REQUEUE_PI   12
+
+#define FUTEX_PRIVATE_FLAG     128
+#define FUTEX_CLOCK_REALTIME   256
+#define FUTEX_CMD_MASK         ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
+
+#define FUTEX_WAIT_PRIVATE     (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAKE_PRIVATE     (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
+#define FUTEX_REQUEUE_PRIVATE  (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG)
+#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAKE_OP_PRIVATE  (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
+#define FUTEX_LOCK_PI_PRIVATE  (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_UNLOCK_PI_PRIVATE        (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAIT_BITSET_PRIVATE      (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAKE_BITSET_PRIVATE      (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAIT_REQUEUE_PI_PRIVATE  (FUTEX_WAIT_REQUEUE_PI | \
+                                        FUTEX_PRIVATE_FLAG)
+#define FUTEX_CMP_REQUEUE_PI_PRIVATE   (FUTEX_CMP_REQUEUE_PI | \
+                                        FUTEX_PRIVATE_FLAG)
+
+/*
+ * Support for robust futexes: the kernel cleans up held futexes at
+ * thread exit time.
+ */
+
+/*
+ * Per-lock list entry - embedded in user-space locks, somewhere close
+ * to the futex field. (Note: user-space uses a double-linked list to
+ * achieve O(1) list add and remove, but the kernel only needs to know
+ * about the forward link)
+ *
+ * NOTE: this structure is part of the syscall ABI, and must not be
+ * changed.
+ */
+struct robust_list {
+       struct robust_list __user *next;
+};
+
+/*
+ * Per-thread list head:
+ *
+ * NOTE: this structure is part of the syscall ABI, and must only be
+ * changed if the change is first communicated with the glibc folks.
+ * (When an incompatible change is done, we'll increase the structure
+ *  size, which glibc will detect)
+ */
+struct robust_list_head {
+       /*
+        * The head of the list. Points back to itself if empty:
+        */
+       struct robust_list list;
+
+       /*
+        * This relative offset is set by user-space, it gives the kernel
+        * the relative position of the futex field to examine. This way
+        * we keep userspace flexible, to freely shape its data-structure,
+        * without hardcoding any particular offset into the kernel:
+        */
+       long futex_offset;
+
+       /*
+        * The death of the thread may race with userspace setting
+        * up a lock's links. So to handle this race, userspace first
+        * sets this field to the address of the to-be-taken lock,
+        * then does the lock acquire, and then adds itself to the
+        * list, and then clears this field. Hence the kernel will
+        * always have full knowledge of all locks that the thread
+        * _might_ have taken. We check the owner TID in any case,
+        * so only truly owned locks will be handled.
+        */
+       struct robust_list __user *list_op_pending;
+};
+
+/*
+ * Are there any waiters for this robust futex:
+ */
+#define FUTEX_WAITERS          0x80000000
+
+/*
+ * The kernel signals via this bit that a thread holding a futex
+ * has exited without unlocking the futex. The kernel also does
+ * a FUTEX_WAKE on such futexes, after setting the bit, to wake
+ * up any possible waiters:
+ */
+#define FUTEX_OWNER_DIED       0x40000000
+
+/*
+ * The rest of the robust-futex field is for the TID:
+ */
+#define FUTEX_TID_MASK         0x3fffffff
+
+/*
+ * This limit protects against a deliberately circular list.
+ * (Not worth introducing an rlimit for it)
+ */
+#define ROBUST_LIST_LIMIT      2048
+
+/*
+ * bitset with all bits set for the FUTEX_xxx_BITSET OPs to request a
+ * match of any bit.
+ */
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#ifdef __KERNEL__
+struct inode;
+struct mm_struct;
+struct task_struct;
+union ktime;
+
+long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
+             u32 __user *uaddr2, u32 val2, u32 val3);
+
+extern int
+handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
+
+/*
+ * Futexes are matched on equal values of this key.
+ * The key type depends on whether it's a shared or private mapping.
+ * Don't rearrange members without looking at hash_futex().
+ *
+ * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
+ * We use the two low order bits of offset to tell what is the kind of key :
+ *  00 : Private process futex (PTHREAD_PROCESS_PRIVATE)
+ *       (no reference on an inode or mm)
+ *  01 : Shared futex (PTHREAD_PROCESS_SHARED)
+ *     mapped on a file (reference on the underlying inode)
+ *  10 : Shared futex (PTHREAD_PROCESS_SHARED)
+ *       (but private mapping on an mm, and reference taken on it)
+*/
+
+#define FUT_OFF_INODE    1 /* We set bit 0 if key has a reference on inode */
+#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
+
+union futex_key {
+       struct {
+               unsigned long pgoff;
+               struct inode *inode;
+               int offset;
+       } shared;
+       struct {
+               unsigned long address;
+               struct mm_struct *mm;
+               int offset;
+       } private;
+       struct {
+               unsigned long word;
+               void *ptr;
+               int offset;
+       } both;
+};
+
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
+#ifdef CONFIG_FUTEX
+extern void exit_robust_list(struct task_struct *curr);
+extern void exit_pi_state_list(struct task_struct *curr);
+extern int futex_cmpxchg_enabled;
+#else
+static inline void exit_robust_list(struct task_struct *curr)
+{
+}
+static inline void exit_pi_state_list(struct task_struct *curr)
+{
+}
+#endif
+#endif /* __KERNEL__ */
+
+#define FUTEX_OP_SET           0       /* *(int *)UADDR2 = OPARG; */
+#define FUTEX_OP_ADD           1       /* *(int *)UADDR2 += OPARG; */
+#define FUTEX_OP_OR            2       /* *(int *)UADDR2 |= OPARG; */
+#define FUTEX_OP_ANDN          3       /* *(int *)UADDR2 &= ~OPARG; */
+#define FUTEX_OP_XOR           4       /* *(int *)UADDR2 ^= OPARG; */
+
+#define FUTEX_OP_OPARG_SHIFT   8       /* Use (1 << OPARG) instead of OPARG.  
*/
+
+#define FUTEX_OP_CMP_EQ                0       /* if (oldval == CMPARG) wake */
+#define FUTEX_OP_CMP_NE                1       /* if (oldval != CMPARG) wake */
+#define FUTEX_OP_CMP_LT                2       /* if (oldval < CMPARG) wake */
+#define FUTEX_OP_CMP_LE                3       /* if (oldval <= CMPARG) wake */
+#define FUTEX_OP_CMP_GT                4       /* if (oldval > CMPARG) wake */
+#define FUTEX_OP_CMP_GE                5       /* if (oldval >= CMPARG) wake */
+
+/* FUTEX_WAKE_OP will perform atomically
+   int oldval = *(int *)UADDR2;
+   *(int *)UADDR2 = oldval OP OPARG;
+   if (oldval CMP CMPARG)
+     wake UADDR2;  */
+
+#define FUTEX_OP(op, oparg, cmp, cmparg) \
+  (((op & 0xf) << 28) | ((cmp & 0xf) << 24)            \
+   | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
+
+#endif
+
-- 
2.20.1


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.