diff --git a/src/coreclr/interpreter/compiler.cpp b/src/coreclr/interpreter/compiler.cpp
index fe416738fa506e..d4f5f3d723c8c3 100644
--- a/src/coreclr/interpreter/compiler.cpp
+++ b/src/coreclr/interpreter/compiler.cpp
@@ -2502,6 +2502,26 @@ void InterpCompiler::EmitOneArgBranch(InterpOpcode opcode, int32_t ilOffset, int
}
}
+// Determines whether I4 to I8 promotion should use zero-extension (for unsigned operations)
+// or sign-extension (for signed operations), based on the opcode.
+InterpOpcode InterpOpForWideningArgForImplicitUpcast(InterpOpcode opcode)
+{
+ switch (opcode)
+ {
+ case INTOP_BNE_UN_I4:
+ case INTOP_BLE_UN_I4:
+ case INTOP_BLT_UN_I4:
+ case INTOP_BGE_UN_I4:
+ case INTOP_BGT_UN_I4:
+ case INTOP_ADD_OVF_UN_I4:
+ case INTOP_SUB_OVF_UN_I4:
+ case INTOP_MUL_OVF_UN_I4:
+ return INTOP_CONV_U8_U4;
+ default:
+ return INTOP_CONV_I8_I4;
+ }
+}
+
void InterpCompiler::EmitTwoArgBranch(InterpOpcode opcode, int32_t ilOffset, int insSize)
{
CHECK_STACK(2);
@@ -2512,12 +2532,12 @@ void InterpCompiler::EmitTwoArgBranch(InterpOpcode opcode, int32_t ilOffset, int
// emitting the conditional branch
if (argType1 == StackTypeI4 && argType2 == StackTypeI8)
{
- EmitConv(m_pStackPointer - 1, StackTypeI8, INTOP_CONV_I8_I4);
+ EmitConv(m_pStackPointer - 1, StackTypeI8, InterpOpForWideningArgForImplicitUpcast(opcode));
argType1 = StackTypeI8;
}
else if (argType1 == StackTypeI8 && argType2 == StackTypeI4)
{
- EmitConv(m_pStackPointer - 2, StackTypeI8, INTOP_CONV_I8_I4);
+ EmitConv(m_pStackPointer - 2, StackTypeI8, InterpOpForWideningArgForImplicitUpcast(opcode));
}
else if (argType1 == StackTypeR4 && argType2 == StackTypeR8)
{
@@ -2753,12 +2773,12 @@ void InterpCompiler::EmitBinaryArithmeticOp(int32_t opBase)
#if TARGET_64BIT
if (type1 == StackTypeI8 && type2 == StackTypeI4)
{
- EmitConv(m_pStackPointer - 1, StackTypeI8, INTOP_CONV_I8_I4);
+ EmitConv(m_pStackPointer - 1, StackTypeI8, InterpOpForWideningArgForImplicitUpcast((InterpOpcode)opBase));
type2 = StackTypeI8;
}
else if (type1 == StackTypeI4 && type2 == StackTypeI8)
{
- EmitConv(m_pStackPointer - 2, StackTypeI8, INTOP_CONV_I8_I4);
+ EmitConv(m_pStackPointer - 2, StackTypeI8, InterpOpForWideningArgForImplicitUpcast((InterpOpcode)opBase));
type1 = StackTypeI8;
}
#endif
diff --git a/src/tests/JIT/Methodical/Methodical_d2.csproj b/src/tests/JIT/Methodical/Methodical_d2.csproj
index 3053cf52ca45d9..a5bb37cde67e9d 100644
--- a/src/tests/JIT/Methodical/Methodical_d2.csproj
+++ b/src/tests/JIT/Methodical/Methodical_d2.csproj
@@ -11,6 +11,7 @@
+
@@ -45,6 +46,7 @@
+
diff --git a/src/tests/JIT/Methodical/Methodical_do.csproj b/src/tests/JIT/Methodical/Methodical_do.csproj
index 7447e30fe06523..79b2779f80b8ed 100644
--- a/src/tests/JIT/Methodical/Methodical_do.csproj
+++ b/src/tests/JIT/Methodical/Methodical_do.csproj
@@ -9,6 +9,7 @@
+
@@ -183,6 +184,7 @@
+
diff --git a/src/tests/JIT/Methodical/Methodical_r2.csproj b/src/tests/JIT/Methodical/Methodical_r2.csproj
index f07b36dfa1e971..b3b4e66d8cc7c0 100644
--- a/src/tests/JIT/Methodical/Methodical_r2.csproj
+++ b/src/tests/JIT/Methodical/Methodical_r2.csproj
@@ -11,6 +11,7 @@
+
@@ -45,6 +46,7 @@
+
diff --git a/src/tests/JIT/Methodical/Methodical_ro.csproj b/src/tests/JIT/Methodical/Methodical_ro.csproj
index edabd5c971a472..2e88949d8f061f 100644
--- a/src/tests/JIT/Methodical/Methodical_ro.csproj
+++ b/src/tests/JIT/Methodical/Methodical_ro.csproj
@@ -9,6 +9,7 @@
+
@@ -183,6 +184,7 @@
+
diff --git a/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion.cs b/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion.cs
new file mode 100644
index 00000000000000..b4701fabbb2b40
--- /dev/null
+++ b/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion.cs
@@ -0,0 +1,236 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using Xunit;
+using JitTest_implicit_promotion;
+using implicit_promotion;
+
+namespace JitTest_implicit_promotion
+{
+ public class Test
+ {
+ // These tests attempt to verify that the implicit upcasting from I4 to I which happens on 64 bit platforms
+ // is done in a consistent manner across all implementations.
+ // Notable details of interest:
+ // add.ovf.un, sub.ovf.un, mul.ovf.un upcast without sign-extension.
+ // div.un, and rem.un upcast with sign-extension.
+ // clt.un, cgt.un upcast without sign-extension
+ // bne.un, blt.un, ble.un, bgt.un, bge.un upcast without sign-extension
+ [Fact]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/122398", TestRuntimes.Mono)]
+ public static void TestUpcastBehavior()
+ {
+ unchecked
+ {
+ //////////////////////////////////////////////////////////////////
+ /// Test scenarios where the first operand is I and the second is i32
+ /////////////////////////////////////////////////////////////////
+
+ // add: (int)0x1 + -2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.add_I_i32((nint)0x1, -2));
+
+ // add.ovf.un: (int)0x1 + -2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(ulong)0x00000000FFFFFFFF : (nint)(-1), Operator.add_ovf_un_I_i32((nint)0x1, -2));
+
+ // add.ovf: (int)0x1 + -2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.add_ovf_I_i32((nint)0x1, -2));
+
+ // sub: -1 - -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.sub_I_i32((nint)(-1), -1));
+
+ // sub.ovf: -1 - -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.sub_ovf_I_i32((nint)(-1), -1));
+
+ // sub.ovf.un: -1 - -1
+ Assert.Equal(Environment.Is64BitProcess ? unchecked((nint)(ulong)0xFFFFFFFF00000000) : (nint)0, Operator.sub_ovf_un_I_i32((nint)(-1), -1));
+
+ // mul: (int)0x2 * -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-2) : (nint)(-2), Operator.mul_I_i32((nint)0x2, -1));
+
+ // mul.ovf: (int)0x2 * -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-2) : (nint)(-2), Operator.mul_ovf_I_i32((nint)0x2, -1));
+
+ // mul.ovf.un: (int)0x2 * -1
+ if (!Environment.Is64BitProcess)
+ {
+ Assert.Throws(() => Operator.mul_ovf_un_I_i32((nint)0x2, -1));
+ }
+ else
+ {
+ Assert.Equal((nint)(ulong)0x1FFFFFFFE, Operator.mul_ovf_un_I_i32((nint)0x2, -1));
+ }
+
+ // div: -1 / -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.div_I_i32((nint)(-1), -1));
+
+ // div.un: -1 / -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.div_un_I_i32((nint)(-1), -1));
+
+ // rem: -1 % -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.rem_I_i32((nint)(-1), -1));
+
+ // rem.un: -1 % -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.rem_un_I_i32((nint)(-1), -1));
+
+ // and: -1 & -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.and_I_i32((nint)(-1), -1));
+
+ // or: 0 | -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.or_I_i32((nint)0, -1));
+
+ // xor: -1 ^ -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.xor_I_i32((nint)(-1), -1));
+
+ // ceq: -1 == -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.ceq_I_i32((nint)(-1), -1));
+
+ // cgt: -2 > -3
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.cgt_I_i32((nint)(-2), -3));
+
+ // cgt.un: -2 > -1 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.cgt_un_I_i32((nint)(-2), -1));
+
+ // clt: -1 < -2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.clt_I_i32((nint)(-1), -2));
+
+ // clt.un: -2 < -1 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.clt_un_I_i32((nint)(-2), -1));
+
+ // beq: -1 == -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.beq_I_i32((nint)(-1), -1));
+
+ // bne.un: -1 != -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)0, Operator.bne_un_I_i32((nint)(-1), -1));
+
+ // blt: -1 < -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.blt_I_i32((nint)(-1), -1));
+
+ // blt.un: -2 < -1 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)1, Operator.blt_un_I_i32((nint)(-2), -1));
+
+ // ble: -1 <= -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.ble_I_i32((nint)(-1), -1));
+
+ // ble.un: -1 <= -1 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)1, Operator.ble_un_I_i32((nint)(-1), -1));
+
+ // bgt: -1 > -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.bgt_I_i32((nint)(-1), -1));
+
+ // bgt.un: -1 > -1 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)0, Operator.bgt_un_I_i32((nint)(-1), -1));
+
+ // bge: -1 >= -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.bge_I_i32((nint)(-1), -1));
+
+ // bge.un: 0xFFFFFFFF >= -2 (unsigned, special case)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.bge_un_I_i32(unchecked((nint)(nuint)0xFFFFFFFF), -2));
+
+ //////////////////////////////////////////////////////////////////
+ /// Test scenarios where the first operand is i32 and the second is I
+ /////////////////////////////////////////////////////////////////
+
+ // add: -2 + 1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.add_i32_I(-2, (nint)1));
+
+ // add.ovf.un: -2 + 1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(ulong)0x00000000FFFFFFFF : (nint)(-1), Operator.add_ovf_un_i32_I(-2, (nint)1));
+
+ // add.ovf: -2 + 1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.add_ovf_i32_I(-2, (nint)1));
+
+ // sub: -1 - -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.sub_i32_I(-1, (nint)(-1)));
+
+ // sub.ovf: -1 - -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.sub_ovf_i32_I(-1, (nint)(-1)));
+
+ // sub.ovf.un: -1 - 1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(ulong)0xFFFFFFFE : (nint)(-2), Operator.sub_ovf_un_i32_I(-1, (nint)1));
+
+ // mul: -1 * 2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-2) : (nint)(-2), Operator.mul_i32_I(-1, (nint)2));
+
+ // mul.ovf: -1 * 2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-2) : (nint)(-2), Operator.mul_ovf_i32_I(-1, (nint)2));
+
+ // mul.ovf.un: -1 * 2
+ if (!Environment.Is64BitProcess)
+ {
+ Assert.Throws(() => Operator.mul_ovf_un_i32_I(-1, (nint)2));
+ }
+ else
+ {
+ Assert.Equal((nint)(ulong)0x1FFFFFFFE, Operator.mul_ovf_un_i32_I(-1, (nint)2));
+ }
+
+ // div: -1 / -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.div_i32_I(-1, (nint)(-1)));
+
+ // div.un: -1 / -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.div_un_i32_I(-1, (nint)(-1)));
+
+ // rem: -1 % -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.rem_i32_I(-1, (nint)(-1)));
+
+ // rem.un: -1 % -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.rem_un_i32_I(-1, (nint)(-1)));
+
+ // and: -1 & -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.and_i32_I(-1, (nint)(-1)));
+
+ // or: -1 | 0
+ Assert.Equal(Environment.Is64BitProcess ? (nint)(-1) : (nint)(-1), Operator.or_i32_I(-1, (nint)0));
+
+ // xor: -1 ^ -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.xor_i32_I(-1, (nint)(-1)));
+
+ // ceq: -1 == -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.ceq_i32_I(-1, (nint)(-1)));
+
+ // cgt: -2 > -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.cgt_i32_I(-2, (nint)(-1)));
+
+ // cgt.un: -1 > -2 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.cgt_un_i32_I(-1, (nint)(-2)));
+
+ // clt: -1 < -2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)0, Operator.clt_i32_I(-1, (nint)(-2)));
+
+ // clt.un: -2 < -1 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.clt_un_i32_I(-2, (nint)(-1)));
+
+ // beq: -1 == -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.beq_i32_I(-1, (nint)(-1)));
+
+ // bne.un: -1 != -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)0, Operator.bne_un_i32_I(-1, (nint)(-1)));
+
+ // blt: -2 < -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.blt_i32_I(-2, (nint)(-1)));
+
+ // blt.un: -1 < -2 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)0, Operator.blt_un_i32_I(-1, (nint)(-2)));
+
+ // ble: -1 <= -1
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.ble_i32_I(-1, (nint)(-1)));
+
+ // ble.un: -1 <= -2 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)0, Operator.ble_un_i32_I(-1, (nint)(-2)));
+
+ // bgt: -1 > -2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.bgt_i32_I(-1, (nint)(-2)));
+
+ // bgt.un: -1 > -2 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)1, Operator.bgt_un_i32_I(-1, (nint)(-2)));
+
+ // bge: -1 >= -2
+ Assert.Equal(Environment.Is64BitProcess ? (nint)1 : (nint)1, Operator.bge_i32_I(-1, (nint)(-2)));
+
+ // bge.un: -1 >= -2 (unsigned)
+ Assert.Equal(Environment.Is64BitProcess ? (nint)0 : (nint)1, Operator.bge_un_i32_I(-1, (nint)(-2)));
+ }
+ }
+ }
+}
diff --git a/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion_il.il b/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion_il.il
new file mode 100644
index 00000000000000..00f98530c15434
--- /dev/null
+++ b/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion_il.il
@@ -0,0 +1,828 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+
+
+
+
+
+
+.assembly extern mscorlib { }
+.assembly extern System.Console
+{
+ .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A )
+ .ver 4:0:0:0
+}
+.assembly implicit_promotion_il
+{
+}
+.assembly extern xunit.core {}
+// MVID: {964B45BB-9F5B-4A2B-9ECD-E062E2FE8E23}
+.namespace implicit_promotion
+{
+ .class public auto ansi Operator
+ extends ['mscorlib']System.Object
+ {
+ .method public hidebysig static native int
+ add_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ add
+ ret
+ } // end of method 'Operator::add_I_i32'
+
+ .method public hidebysig static native int
+ add_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ add
+ ret
+ } // end of method 'Operator::add_i32_I'
+
+ .method public hidebysig static native int
+ add_ovf_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ add.ovf
+ ret
+ } // end of method 'Operator::add_ovf_I_i32'
+
+ .method public hidebysig static native int
+ add_ovf_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ add.ovf
+ ret
+ } // end of method 'Operator::add_ovf_i32_I'
+
+ .method public hidebysig static native int
+ add_ovf_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ add.ovf.un
+ ret
+ } // end of method 'Operator::add_ovf_un_I_i32'
+
+ .method public hidebysig static native int
+ add_ovf_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ add.ovf.un
+ ret
+ } // end of method 'Operator::add_ovf_un_i32_I'
+
+ .method public hidebysig static native int
+ sub_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ sub
+ ret
+ } // end of method 'Operator::sub_I_i32'
+
+ .method public hidebysig static native int
+ sub_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ sub
+ ret
+ } // end of method 'Operator::sub_i32_I'
+
+ .method public hidebysig static native int
+ sub_ovf_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ sub.ovf
+ ret
+ } // end of method 'Operator::sub_ovf_I_i32'
+
+ .method public hidebysig static native int
+ sub_ovf_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ sub.ovf
+ ret
+ } // end of method 'Operator::sub_ovf_i32_I'
+
+ .method public hidebysig static native int
+ sub_ovf_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ sub.ovf.un
+ ret
+ } // end of method 'Operator::sub_ovf_un_I_i32'
+
+ .method public hidebysig static native int
+ sub_ovf_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ sub.ovf.un
+ ret
+ } // end of method 'Operator::sub_ovf_un_i32'
+
+ .method public hidebysig static native int
+ mul_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ mul
+ ret
+ } // end of method 'Operator::mul_I_i32'
+
+ .method public hidebysig static native int
+ mul_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ mul
+ ret
+ } // end of method 'Operator::mul_i32_I'
+
+ .method public hidebysig static native int
+ mul_ovf_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ mul.ovf
+ ret
+ } // end of method 'Operator::mul_ovf_I_i32'
+
+ .method public hidebysig static native int
+ mul_ovf_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ mul.ovf
+ ret
+ } // end of method 'Operator::mul_ovf_i32_I'
+
+ .method public hidebysig static native int
+ mul_ovf_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ mul.ovf.un
+ ret
+ } // end of method 'Operator::mul_ovf_un_I_i32'
+
+ .method public hidebysig static native int
+ mul_ovf_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ mul.ovf.un
+ ret
+ } // end of method 'Operator::mul_ovf_un_i32'
+
+ .method public hidebysig static native int
+ div_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ div
+ ret
+ } // end of method 'Operator::div_I_i32'
+
+ .method public hidebysig static native int
+ div_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ div
+ ret
+ } // end of method 'Operator::div_i32_I'
+
+ .method public hidebysig static native int
+ div_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ div.un
+ ret
+ } // end of method 'Operator::div_un_I_i32'
+
+ .method public hidebysig static native int
+ div_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ div.un
+ ret
+ } // end of method 'Operator::div_un_i32'
+
+ .method public hidebysig static native int
+ rem_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ rem
+ ret
+ } // end of method 'Operator::rem_I_i32'
+
+ .method public hidebysig static native int
+ rem_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ rem
+ ret
+ } // end of method 'Operator::rem_i32_I'
+
+ .method public hidebysig static native int
+ rem_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ rem.un
+ ret
+ } // end of method 'Operator::rem_un_I_i32'
+
+ .method public hidebysig static native int
+ rem_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ // Code size 161 (0xa1)
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ rem.un
+ ret
+ } // end of method 'Operator::rem_un_i32'
+
+ .method public hidebysig static native int
+ and_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ and
+ ret
+ } // end of method 'Operator::and_I_i32'
+
+ .method public hidebysig static native int
+ and_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ and
+ ret
+ } // end of method 'Operator::and_i32_I'
+
+ .method public hidebysig static native int
+ or_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ or
+ ret
+ } // end of method 'Operator::or_I_i32'
+
+ .method public hidebysig static native int
+ or_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ or
+ ret
+ } // end of method 'Operator::or_i32_I'
+
+ .method public hidebysig static native int
+ xor_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ xor
+ ret
+ } // end of method 'Operator::xor_I_i32'
+
+ .method public hidebysig static native int
+ xor_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ xor
+ ret
+ } // end of method 'Operator::xor_i32_I'
+
+ .method public hidebysig static native int
+ ceq_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ ceq
+ ret
+ } // end of method 'Operator::ceq_I_i32'
+
+ .method public hidebysig static native int
+ ceq_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ ceq
+ ret
+ } // end of method 'Operator::ceq_i32_I'
+
+ .method public hidebysig static native int
+ cgt_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ cgt
+ ret
+ } // end of method 'Operator::cgt_I_i32'
+
+ .method public hidebysig static native int
+ cgt_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ cgt
+ ret
+ } // end of method 'Operator::cgt_i32_I'
+
+ .method public hidebysig static native int
+ cgt_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ cgt.un
+ ret
+ } // end of method 'Operator::cgt_un_I_i32'
+
+ .method public hidebysig static native int
+ cgt_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ cgt.un
+ ret
+ } // end of method 'Operator::cgt_un_i32_I'
+
+ .method public hidebysig static native int
+ clt_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ clt
+ ret
+ } // end of method 'Operator::clt_I_i32'
+
+ .method public hidebysig static native int
+ clt_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ clt
+ ret
+ } // end of method 'Operator::clt_i32_I'
+
+ .method public hidebysig static native int
+ clt_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ clt.un
+ ret
+ } // end of method 'Operator::clt_un_I_i32'
+
+ .method public hidebysig static native int
+ clt_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ clt.un
+ ret
+ } // end of method 'Operator::clt_un_i32_I'
+
+ .method public hidebysig static native int
+ beq_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ beq TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::beq_I_i32'
+
+ .method public hidebysig static native int
+ beq_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ beq TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::beq_i32_I'
+
+ .method public hidebysig static native int
+ bne_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bne.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bne_un_I_i32'
+
+ .method public hidebysig static native int
+ bne_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bne.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bne_un_i32_I'
+
+ .method public hidebysig static native int
+ blt_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ blt TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::blt_I_i32'
+
+ .method public hidebysig static native int
+ blt_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ blt TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::blt_i32_I'
+
+ .method public hidebysig static native int
+ blt_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ blt.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::blt_un_I_i32'
+
+ .method public hidebysig static native int
+ blt_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ blt.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::blt_un_i32_I'
+
+ .method public hidebysig static native int
+ ble_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ ble TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::ble_I_i32'
+
+ .method public hidebysig static native int
+ ble_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ ble TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::ble_i32_I'
+
+ .method public hidebysig static native int
+ ble_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ ble.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::ble_un_I_i32'
+
+ .method public hidebysig static native int
+ ble_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ ble.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::ble_un_i32_I'
+
+ .method public hidebysig static native int
+ bgt_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bgt TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bgt_I_i32'
+
+ .method public hidebysig static native int
+ bgt_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bgt TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bgt_i32_I'
+
+ .method public hidebysig static native int
+ bgt_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bgt.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bgt_un_I_i32'
+
+ .method public hidebysig static native int
+ bgt_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bgt.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bgt_un_i32_I'
+
+ .method public hidebysig static native int
+ bge_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bge TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bge_I_i32'
+
+ .method public hidebysig static native int
+ bge_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bge TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bge_i32_I'
+
+ .method public hidebysig static native int
+ bge_un_I_i32(native int a,
+ int32 b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bge.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bge_un_I_i32'
+
+ .method public hidebysig static native int
+ bge_un_i32_I(int32 a,
+ native int b) il managed
+ {
+ .maxstack 2
+ ldarg.0
+ ldarg.1
+ bge.un TAKEN
+ ldc.i4.0
+ ret
+ TAKEN:
+ ldc.i4.1
+ ret
+ } // end of method 'Operator::bge_un_i32_I'
+
+ .method public hidebysig specialname rtspecialname
+ instance void .ctor() il managed
+ {
+ // Code size 7 (0x7)
+ .maxstack 8
+ IL_0000: ldarg.0
+ IL_0001: call instance void ['mscorlib']System.Object::.ctor()
+ IL_0006: ret
+ } // end of method 'Operator::.ctor'
+
+ } // end of class 'Test'
+
+} // end of namespace 'implicit_promotion'
+
+//*********** DISASSEMBLY COMPLETE ***********************
diff --git a/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion_il.ilproj b/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion_il.ilproj
new file mode 100644
index 00000000000000..8a3f6ca933bfaf
--- /dev/null
+++ b/src/tests/JIT/Methodical/int64/unsigned/implicit_promotion_il.ilproj
@@ -0,0 +1,10 @@
+
+
+
+
+ PdbOnly
+
+
+
+
+