-
Notifications
You must be signed in to change notification settings - Fork 13.3k
[IR][AsmParser] Revamp how floating-point literals work in LLVM IR. #121838
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
@llvm/pr-subscribers-llvm-adt @llvm/pr-subscribers-backend-hexagon Author: Joshua Cranmer (jcranmer-intel) ChangesThis adds support for the following kinds of formats:
Additionally, the floating-point hexadecimal format that records the bitpattern exactly no longer requires the 0xL or 0xK or similar code for the floating-point type. This format is removed from the documentation, but is still supported as a legacy format in the parser. Patch is 1.28 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/121838.diff 532 Files Affected:
diff --git a/clang/test/C/C11/n1396.c b/clang/test/C/C11/n1396.c
index 6f76cfe9594961..264c69c733cb68 100644
--- a/clang/test/C/C11/n1396.c
+++ b/clang/test/C/C11/n1396.c
@@ -31,7 +31,7 @@
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -42,7 +42,7 @@
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -64,7 +64,7 @@
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -75,7 +75,7 @@
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -86,7 +86,7 @@
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -102,7 +102,7 @@ float extended_float_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -113,7 +113,7 @@ float extended_float_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -135,7 +135,7 @@ float extended_float_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -146,7 +146,7 @@ float extended_float_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -157,7 +157,7 @@ float extended_float_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -173,7 +173,7 @@ float extended_float_func_cast(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -184,7 +184,7 @@ float extended_float_func_cast(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -206,7 +206,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -217,7 +217,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -228,7 +228,7 @@ float extended_float_func_cast(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -244,7 +244,7 @@ float extended_double_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -255,7 +255,7 @@ float extended_double_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -277,7 +277,7 @@ float extended_double_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -288,7 +288,7 @@ float extended_double_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -299,7 +299,7 @@ float extended_double_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
index 9109626cea9ca2..2c87ce32b8811b 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
@@ -12,8 +12,8 @@
#include <arm_fp16.h>
// COMMON-LABEL: test_vceqzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"oeq", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half f0x0000, metadata !"oeq", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -21,8 +21,8 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"oge", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"oge", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -30,8 +30,8 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgtzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ogt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ogt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,8 +39,8 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vclezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ole", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ole", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -48,8 +48,8 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcltzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"olt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"olt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
index 90ee74e459ebd4..27d60de792b074 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
@@ -15,7 +15,7 @@ float16_t test_vabsh_f16(float16_t a) {
}
// CHECK-LABEL: test_vceqzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -23,7 +23,7 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -31,7 +31,7 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgtzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,7 +39,7 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vclezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -47,7 +47,7 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcltzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
index a8fb989b64de50..b6bbff0c742f89 100644
--- a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
+++ b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
@@ -191,7 +191,7 @@ double test_double_pre_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_inc(
@@ -199,7 +199,7 @@ double test_double_pre_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_inc()
@@ -213,7 +213,7 @@ _Float16 test__Float16_post_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_dc(
@@ -221,7 +221,7 @@ _Float16 test__Float16_post_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_dc()
@@ -235,8 +235,8 @@ _Float16 test__Float16_post_dc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST...
[truncated]
|
@llvm/pr-subscribers-clang Author: Joshua Cranmer (jcranmer-intel) ChangesThis adds support for the following kinds of formats:
Additionally, the floating-point hexadecimal format that records the bitpattern exactly no longer requires the 0xL or 0xK or similar code for the floating-point type. This format is removed from the documentation, but is still supported as a legacy format in the parser. Patch is 1.28 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/121838.diff 532 Files Affected:
diff --git a/clang/test/C/C11/n1396.c b/clang/test/C/C11/n1396.c
index 6f76cfe9594961..264c69c733cb68 100644
--- a/clang/test/C/C11/n1396.c
+++ b/clang/test/C/C11/n1396.c
@@ -31,7 +31,7 @@
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -42,7 +42,7 @@
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -64,7 +64,7 @@
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -75,7 +75,7 @@
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -86,7 +86,7 @@
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -102,7 +102,7 @@ float extended_float_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -113,7 +113,7 @@ float extended_float_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -135,7 +135,7 @@ float extended_float_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -146,7 +146,7 @@ float extended_float_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -157,7 +157,7 @@ float extended_float_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -173,7 +173,7 @@ float extended_float_func_cast(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -184,7 +184,7 @@ float extended_float_func_cast(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -206,7 +206,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -217,7 +217,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -228,7 +228,7 @@ float extended_float_func_cast(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -244,7 +244,7 @@ float extended_double_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -255,7 +255,7 @@ float extended_double_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -277,7 +277,7 @@ float extended_double_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -288,7 +288,7 @@ float extended_double_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -299,7 +299,7 @@ float extended_double_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
index 9109626cea9ca2..2c87ce32b8811b 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
@@ -12,8 +12,8 @@
#include <arm_fp16.h>
// COMMON-LABEL: test_vceqzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"oeq", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half f0x0000, metadata !"oeq", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -21,8 +21,8 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"oge", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"oge", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -30,8 +30,8 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgtzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ogt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ogt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,8 +39,8 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vclezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ole", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ole", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -48,8 +48,8 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcltzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"olt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"olt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
index 90ee74e459ebd4..27d60de792b074 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
@@ -15,7 +15,7 @@ float16_t test_vabsh_f16(float16_t a) {
}
// CHECK-LABEL: test_vceqzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -23,7 +23,7 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -31,7 +31,7 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgtzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,7 +39,7 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vclezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -47,7 +47,7 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcltzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
index a8fb989b64de50..b6bbff0c742f89 100644
--- a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
+++ b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
@@ -191,7 +191,7 @@ double test_double_pre_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_inc(
@@ -199,7 +199,7 @@ double test_double_pre_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_inc()
@@ -213,7 +213,7 @@ _Float16 test__Float16_post_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_dc(
@@ -221,7 +221,7 @@ _Float16 test__Float16_post_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_dc()
@@ -235,8 +235,8 @@ _Float16 test__Float16_post_dc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST...
[truncated]
|
@llvm/pr-subscribers-backend-amdgpu Author: Joshua Cranmer (jcranmer-intel) ChangesThis adds support for the following kinds of formats:
Additionally, the floating-point hexadecimal format that records the bitpattern exactly no longer requires the 0xL or 0xK or similar code for the floating-point type. This format is removed from the documentation, but is still supported as a legacy format in the parser. Patch is 1.28 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/121838.diff 532 Files Affected:
diff --git a/clang/test/C/C11/n1396.c b/clang/test/C/C11/n1396.c
index 6f76cfe9594961..264c69c733cb68 100644
--- a/clang/test/C/C11/n1396.c
+++ b/clang/test/C/C11/n1396.c
@@ -31,7 +31,7 @@
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -42,7 +42,7 @@
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -64,7 +64,7 @@
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -75,7 +75,7 @@
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -86,7 +86,7 @@
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -102,7 +102,7 @@ float extended_float_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -113,7 +113,7 @@ float extended_float_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -135,7 +135,7 @@ float extended_float_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -146,7 +146,7 @@ float extended_float_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -157,7 +157,7 @@ float extended_float_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -173,7 +173,7 @@ float extended_float_func_cast(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -184,7 +184,7 @@ float extended_float_func_cast(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -206,7 +206,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -217,7 +217,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -228,7 +228,7 @@ float extended_float_func_cast(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -244,7 +244,7 @@ float extended_double_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -255,7 +255,7 @@ float extended_double_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -277,7 +277,7 @@ float extended_double_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -288,7 +288,7 @@ float extended_double_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -299,7 +299,7 @@ float extended_double_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
index 9109626cea9ca2..2c87ce32b8811b 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
@@ -12,8 +12,8 @@
#include <arm_fp16.h>
// COMMON-LABEL: test_vceqzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"oeq", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half f0x0000, metadata !"oeq", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -21,8 +21,8 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"oge", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"oge", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -30,8 +30,8 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgtzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ogt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ogt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,8 +39,8 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vclezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ole", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ole", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -48,8 +48,8 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcltzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"olt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"olt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
index 90ee74e459ebd4..27d60de792b074 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
@@ -15,7 +15,7 @@ float16_t test_vabsh_f16(float16_t a) {
}
// CHECK-LABEL: test_vceqzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -23,7 +23,7 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -31,7 +31,7 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgtzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,7 +39,7 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vclezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -47,7 +47,7 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcltzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
index a8fb989b64de50..b6bbff0c742f89 100644
--- a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
+++ b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
@@ -191,7 +191,7 @@ double test_double_pre_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_inc(
@@ -199,7 +199,7 @@ double test_double_pre_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_inc()
@@ -213,7 +213,7 @@ _Float16 test__Float16_post_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_dc(
@@ -221,7 +221,7 @@ _Float16 test__Float16_post_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_dc()
@@ -235,8 +235,8 @@ _Float16 test__Float16_post_dc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST...
[truncated]
|
@llvm/pr-subscribers-backend-aarch64 Author: Joshua Cranmer (jcranmer-intel) ChangesThis adds support for the following kinds of formats:
Additionally, the floating-point hexadecimal format that records the bitpattern exactly no longer requires the 0xL or 0xK or similar code for the floating-point type. This format is removed from the documentation, but is still supported as a legacy format in the parser. Patch is 1.28 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/121838.diff 532 Files Affected:
diff --git a/clang/test/C/C11/n1396.c b/clang/test/C/C11/n1396.c
index 6f76cfe9594961..264c69c733cb68 100644
--- a/clang/test/C/C11/n1396.c
+++ b/clang/test/C/C11/n1396.c
@@ -31,7 +31,7 @@
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -42,7 +42,7 @@
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -64,7 +64,7 @@
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -75,7 +75,7 @@
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -86,7 +86,7 @@
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -102,7 +102,7 @@ float extended_float_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -113,7 +113,7 @@ float extended_float_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -135,7 +135,7 @@ float extended_float_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -146,7 +146,7 @@ float extended_float_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -157,7 +157,7 @@ float extended_float_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -173,7 +173,7 @@ float extended_float_func_cast(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -184,7 +184,7 @@ float extended_float_func_cast(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -206,7 +206,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -217,7 +217,7 @@ float extended_float_func_cast(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -228,7 +228,7 @@ float extended_float_func_cast(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
@@ -244,7 +244,7 @@ float extended_double_func(float x) {
// CHECK-X64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-X64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to x86_fp80
-// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], 0xK3FFF8000000000000000
+// CHECK-X64-NEXT: [[MUL:%.*]] = fmul x86_fp80 [[CONV]], f0x3FFF8000000000000000
// CHECK-X64-NEXT: [[CONV1:%.*]] = fptrunc x86_fp80 [[MUL]] to float
// CHECK-X64-NEXT: ret float [[CONV1]]
//
@@ -255,7 +255,7 @@ float extended_double_func(float x) {
// CHECK-AARCH64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-AARCH64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-AARCH64-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-AARCH64-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-AARCH64-NEXT: ret float [[CONV1]]
//
@@ -277,7 +277,7 @@ float extended_double_func(float x) {
// CHECK-PPC32-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC32-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC32-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC32-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC32-NEXT: ret float [[CONV1]]
//
@@ -288,7 +288,7 @@ float extended_double_func(float x) {
// CHECK-PPC64-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-PPC64-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to ppc_fp128
-// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], 0xM3FF00000000000000000000000000000
+// CHECK-PPC64-NEXT: [[MUL:%.*]] = fmul ppc_fp128 [[CONV]], f0x00000000000000003FF0000000000000
// CHECK-PPC64-NEXT: [[CONV1:%.*]] = fptrunc ppc_fp128 [[MUL]] to float
// CHECK-PPC64-NEXT: ret float [[CONV1]]
//
@@ -299,7 +299,7 @@ float extended_double_func(float x) {
// CHECK-SPARCV9-NEXT: store float [[X]], ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[TMP0:%.*]] = load float, ptr [[X_ADDR]], align 4
// CHECK-SPARCV9-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to fp128
-// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], 0xL00000000000000003FFF000000000000
+// CHECK-SPARCV9-NEXT: [[MUL:%.*]] = fmul fp128 [[CONV]], f0x3FFF0000000000000000000000000000
// CHECK-SPARCV9-NEXT: [[CONV1:%.*]] = fptrunc fp128 [[MUL]] to float
// CHECK-SPARCV9-NEXT: ret float [[CONV1]]
//
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
index 9109626cea9ca2..2c87ce32b8811b 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
@@ -12,8 +12,8 @@
#include <arm_fp16.h>
// COMMON-LABEL: test_vceqzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"oeq", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half f0x0000, metadata !"oeq", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -21,8 +21,8 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"oge", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"oge", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -30,8 +30,8 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcgtzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ogt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ogt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,8 +39,8 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// COMMON-LABEL: test_vclezh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ole", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"ole", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -48,8 +48,8 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// COMMON-LABEL: test_vcltzh_f16
-// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
-// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"olt", metadata !"fpexcept.strict")
+// UNCONSTRAINED: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
+// CONSTRAINED: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half f0x0000, metadata !"olt", metadata !"fpexcept.strict")
// COMMONIR: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// COMMONIR: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
index 90ee74e459ebd4..27d60de792b074 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
@@ -15,7 +15,7 @@ float16_t test_vabsh_f16(float16_t a) {
}
// CHECK-LABEL: test_vceqzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oeq half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vceqzh_f16(float16_t a) {
@@ -23,7 +23,7 @@ uint16_t test_vceqzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp oge half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgezh_f16(float16_t a) {
@@ -31,7 +31,7 @@ uint16_t test_vcgezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcgtzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ogt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcgtzh_f16(float16_t a) {
@@ -39,7 +39,7 @@ uint16_t test_vcgtzh_f16(float16_t a) {
}
// CHECK-LABEL: test_vclezh_f16
-// CHECK: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp ole half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vclezh_f16(float16_t a) {
@@ -47,7 +47,7 @@ uint16_t test_vclezh_f16(float16_t a) {
}
// CHECK-LABEL: test_vcltzh_f16
-// CHECK: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
+// CHECK: [[TMP1:%.*]] = fcmp olt half %a, f0x0000
// CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vcltzh_f16(float16_t a) {
diff --git a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
index a8fb989b64de50..b6bbff0c742f89 100644
--- a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
+++ b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
@@ -191,7 +191,7 @@ double test_double_pre_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_inc(
@@ -199,7 +199,7 @@ double test_double_pre_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_inc()
@@ -213,7 +213,7 @@ _Float16 test__Float16_post_inc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2
+// SAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2
// SAFE-NEXT: ret half [[TMP0]]
//
// UNSAFE-LABEL: define dso_local half @test__Float16_post_dc(
@@ -221,7 +221,7 @@ _Float16 test__Float16_post_inc()
// UNSAFE-NEXT: [[ENTRY:.*:]]
// UNSAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// UNSAFE-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
+// UNSAFE-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half f0x3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
// UNSAFE-NEXT: ret half [[TMP0]]
//
_Float16 test__Float16_post_dc()
@@ -235,8 +235,8 @@ _Float16 test__Float16_post_dc()
// SAFE-NEXT: [[ENTRY:.*:]]
// SAFE-NEXT: [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
// SAFE-NEXT: [[RETVAL_ASCAST...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
llvm/lib/AsmParser/LLLexer.cpp
Outdated
TokStart[1] == '0' && TokStart[2] == 'x' && | ||
isxdigit(static_cast<unsigned char>(TokStart[3]))) { | ||
int len = CurPtr-TokStart-3; | ||
bool IsFloatConst = TokStart[0] == 'f'; | ||
int len = CurPtr - TokStart - 3; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I know it's from the old code but since we're changing it anyway could you make it Len
? Also, why int
rather than unsigned
or size_t
llvm/lib/AsmParser/LLParser.cpp
Outdated
} | ||
case lltok::FloatHexLiteral: { | ||
assert(ExpectedTy && "Need type to parse float values"); | ||
auto &Semantics = ExpectedTy->getFltSemantics(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: const auto &
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As you still support the legacy format, could you please restrict this PR to only the parser changes, and leave the printer changes (and the mass test update they require) to a followup?
Sure, I can do it. I made them two separate in the commits partly for that reason. |
1a57536
to
ae7b095
Compare
"how floating-point literals" doesn't read right to me - is there a word missing? |
My build on Linux PPC failed with:
|
llvm/docs/LangRef.rst
Outdated
| | required, as is one or more leading digits before | | ||
| | the decimal point. | | ||
+---------------+---------------------------------------------------+ | ||
| ``-0x1.fp13`` | Common hexadecimal literal. Signs are optional. | |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we switch the default syntax to hex float? Not as part of this PR, it would be more disruptive
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This was discussed on the RFC for this in discourse, but there wasn't entirely a consensus on doing so.
I did start working on a follow-up PR to extend the smart-output to more cases, but in going through and manually fixing the loads of broken tests that aren't automatically updateable, I have found that the current logic leaves a lot to be desired.
This adds support for the following kinds of formats: * Hexadecimal literals like 0x1.fp13 * Special values +inf/-inf, +qnan/-qnan * NaN values with payloads like +nan(0x1) Additionally, the floating-point hexadecimal format that records the bitpattern exactly no longer requires the 0xL or 0xK or similar code for the floating-point type. This format is removed from the documentation, but is still supported as a legacy format in the parser.
ae7b095
to
e070228
Compare
While working on other changes, I noticed that the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Additionally, the floating-point hexadecimal format that records the bitpattern exactly no longer requires the 0xL or 0xK or similar code for the floating-point type. This format is removed from the documentation, but is still supported as a legacy format in the parser.
Personally I'd prefer that anything that's supported is documented, even if it is documented as deprecated.
using the default rounding mode (round to nearest, half to even). String | ||
conversions that underflow to 0 or overflow to infinity are not permitted. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Allowing rounding seems nice. Any particular reason not to allow overflow/underflow? Just being conservative?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My main motivation was to keep the parsing code as strict as possible, so that if you saw a constant in the code, you could be certain you knew what it was. Despite the existing documentation, we already allow inexact conversions from decimal strings to double
(we check for exactness on conversion of the resulting double
to the actual type, though).
There's an argument to be made that allowing 0.1
as a constant, even if we didn't already allow it; I don't see a strong argument for allowing 1e99999
or 1e-99999
when we have easy syntax for infinity already.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I always thought the parser behavior was ensuring the constant was exact with no rounding. If it wasn't doing that, I'd consider it to be a bug and we shouldn't be more lax
@@ -4608,31 +4610,40 @@ Simple Constants | |||
The identifier '``none``' is recognized as an empty token constant | |||
and must be of :ref:`token type <t_token>`. | |||
|
|||
The one non-intuitive notation for constants is the hexadecimal form of | |||
floating-point constants. For example, the form | |||
'``double 0x432ff973cafa8000``' is equivalent to (but harder to read |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Are you removing this old double 0x432ff973cafa8000
syntax? So is this change not backwards compatible?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The original parsing code is still being kept, which is why this patch can go in without a few hundred test changes.
The documentation I removed partially because it's deprecated, partially because it's flat out wrong, and partially because describing the correct behavior is annoying (e.g., 0xM
and 0xL
doesn't work like the documentation suggests).
| | as hexadecimal (not including the quiet bit as | | ||
| | part of the payload). The sign is required. | | ||
+----------------+---------------------------------------------------+ | ||
| ``+snan(0x1)`` | sNaN value with a particular payload, specified | |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why do nan
and snan
take an explicit payload but qnan
does not?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The original proposal was qnan
for the preferred qNaN, and nan(...)
for every other NaN value. I discovered last night that APFloat::convertFromString
didn't allow nan(...)
to produce an sNaN value, and after staring at the IEEE 754 and C23 specifications for a bit to look at what they wanted for string->NaN conversions, I concluded that it was better to explicitly call out an snan(...)
string than to make nan(...)
produce a qNaN value.
There's not much keeping qnan
from having a payload parameter, except that the APFloat::convertFromString
doesn't support it. That's changeable, but the IEEE 754 specification I noticed doesn't ever use qnan
for a qNaN string, so it doesn't entirely feel right to me to change APFloat::convertFromString
to allow it.
FWIW, I also expect that virtually every NaN in practice ends up being +qnan
or -qnan
anyways.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we spell out this assumes the 2008 snan quiet bit pattern?
Review ping. |
using the default rounding mode (round to nearest, half to even). String | ||
conversions that underflow to 0 or overflow to infinity are not permitted. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I always thought the parser behavior was ensuring the constant was exact with no rounding. If it wasn't doing that, I'd consider it to be a bug and we shouldn't be more lax
| | as hexadecimal (not including the quiet bit as | | ||
| | part of the payload). The sign is required. | | ||
+----------------+---------------------------------------------------+ | ||
| ``+snan(0x1)`` | sNaN value with a particular payload, specified | |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we spell out this assumes the 2008 snan quiet bit pattern?
const auto &Semantics = ExpectedTy->getFltSemantics(); | ||
const APInt &Bits = Lex.getAPSIntVal(); | ||
if (APFloat::getSizeInBits(Semantics) != Bits.getBitWidth()) | ||
return error(ID.Loc, "float hex literal has incorrect number of bits"); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add a test for this case? The one place the message appears seems to be an accidental change in a MIR test
// literals. Underflow is thrown when the result is denormal, so to allow | ||
// denormals, only reject underflowing literals that resulted in a zero. | ||
if (*Except & APFloat::opOverflow) | ||
return error(ID.Loc, "floating point constant overflowed type"); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think the prevailing spelling hyphenates floating-point
EXPECT_TRUE( | ||
cast<ConstantFP>(V)->isExactlyValue(APFloat::getNaN(Float, false, 1))); | ||
EXPECT_TRUE(!cast<ConstantFP>(V)->getValue().isSignaling()); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Test with the degenerate FP types? ppc / x86, maybe bfloat?
ASSERT_TRUE(V); | ||
EXPECT_TRUE(V->getType()->isFP128Ty()); | ||
ASSERT_TRUE(isa<ConstantFP>(V)); | ||
EXPECT_TRUE(cast<ConstantFP>(V)->isExactlyValue(-0.0)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I thought we had an isNegZero helper now
This adds support for the following kinds of formats:
Additionally, the floating-point hexadecimal format that records the bitpattern exactly no longer requires the 0xL or 0xK or similar code for the floating-point type. This format is removed from the documentation, but is still supported as a legacy format in the parser.